void send_strided ( pami_context_t context, size_t dispatchc, size_t dispatchs, A1PAMI_Info_t * ainfo, size_t hdrsize, char * buffer, pami_endpoint_t target ) { //fprintf(stderr, "send_strided dispatch %ld, hdrsize %ld\n", // dispatch, hdrsize); int i = 0; _PackState pstate; pami_send_t parameters; parameters.send.header.iov_base = ainfo; parameters.send.header.iov_len = hdrsize; parameters.send.data.iov_base = NULL; parameters.events.cookie = (void *) & _send_active; parameters.events.local_fn = NULL; parameters.events.remote_fn = NULL; parameters.send.dest = target; memset(¶meters.send.hints, 0, sizeof(parameters.send.hints)); if (ainfo->count[0] >= 512) { parameters.send.dispatch = dispatchc; for (i = 0; i < ainfo->count[1]; ++i) { parameters.send.data.iov_base = buffer + ainfo->sstride*i; parameters.send.data.iov_len = ainfo->count[0]; if (i == ainfo->count[1] - 1) parameters.events.remote_fn = cb_done; RC( PAMI_Send (context, ¶meters) ); } while (_send_active) PAMI_Context_advance (context, POLL_CNT); _send_active = 1; } else { parameters.send.dispatch = dispatchs; int bytes = ainfo->count[0] * ainfo->count[1]; void *packbuf = malloc (bytes); packStrided (ainfo, packbuf, buffer); pstate.buffer = packbuf; pstate.counter = & _send_active; parameters.send.data.iov_base = packbuf; parameters.send.data.iov_len = bytes; parameters.events.remote_fn = cb_pack_done; parameters.events.cookie = (void*)&pstate; //fprintf (stderr, "Calling pami_send\n"); RC( PAMI_Send (context, ¶meters) ); while (_send_active) PAMI_Context_advance (context, POLL_CNT); _send_active = 1; //fprintf (stderr, "After pami_send\n"); } }
void _rmw ( pami_context_t context, void * src_ptr_in, void * src_ptr_out, void * target_ptr, pami_atomic_t op, pami_type_t type, pami_endpoint_t target ) { pami_rmw_t rmw; memset(&rmw, 0, sizeof(pami_rmw_t)); rmw.cookie = (void *)&_send_active; rmw.done_fn = cb_done; int test = 0; rmw.local = src_ptr_out; rmw.remote = target_ptr; rmw.value = src_ptr_in; rmw.test = &test; rmw.type = type; rmw.operation = op; rmw.dest = target; #if ENABLE_PROGRESS pami_work_t work; PAMI_Context_post (context, &work, (pami_work_function)PAMI_Rmw, (void*)&rmw); while (_send_active == 1); #else RC( PAMI_Rmw (context, &rmw) ); while (_send_active) PAMI_Context_advance (context, POLL_CNT); #endif _send_active = 1; }
/* Return 1 if entire message is ready, 0 if not*/ int optiq_pami_transport_recv(struct optiq_transport *self, struct optiq_message *message) { #ifdef __bgq__ struct optiq_pami_transport *pami_transport = (struct optiq_pami_transport *)optiq_transport_get_concrete_transport(self); PAMI_Context_advance (pami_transport->context, 100); for (int i = 0; i < pami_transport->local_messages.size(); i++) { struct optiq_message *instant = pami_transport->local_messages.back(); /*printf("Rank %d received as the destination of a message of size %d of job_id = %d, while job_id is %d\n", pami_transport->rank, instant->length, instant->header.job_id, message->header.job_id);*/ if (instant->header.job_id == message->header.job_id) { #ifdef DEBUG printf("Rank %d copies %d bytes of data to offset %d\n", pami_transport->rank, instant->length, instant->header.original_offset); #endif memcpy((void *)&message->buffer[instant->header.original_offset], (const void*)instant->buffer, instant->length); /*printf("Done copy data\n");*/ message->recv_length += instant->length; pami_transport->local_messages.pop_back(); (*pami_transport->avail_recv_messages).push_back(instant); if (message->recv_length == instant->header.original_length) { /*printf("Rank %d received entire message of the job, notify the involved tasks\n", pami_transport->rank);*/ optiq_notify_job_done(self, message->header.job_id, &pami_transport->involved_task_ids); return 1; } } } #endif return 0; }
void send_contig ( pami_context_t context, size_t dispatch, void * metadata, size_t hdrsize, char * buffer, size_t sndlen, pami_endpoint_t target ) { //fprintf(stderr, "Calling send dispatch %ld, hdrsize %ld, bytes %ld\n", //dispatch, hdrsize, sndlen); pami_send_t parameters; parameters.send.dispatch = dispatch; parameters.send.header.iov_base = metadata; parameters.send.header.iov_len = hdrsize; parameters.send.data.iov_base = buffer; parameters.send.data.iov_len = sndlen; parameters.events.cookie = (void *) & _send_active; parameters.events.local_fn = NULL; //cb_done; parameters.events.remote_fn = cb_done; parameters.send.dest = target; memset(¶meters.send.hints, 0, sizeof(parameters.send.hints)); #if ENABLE_PROGRESS pami_work_t work; PAMI_Context_post (context, &work, (pami_work_function)PAMI_Send, (void*)¶meters); while (_send_active == 1); #else RC( PAMI_Send (context, ¶meters) ); while (_send_active) PAMI_Context_advance (context, POLL_CNT); #endif _send_active = 1; }
void get_contig ( pami_context_t context, void * lbuf, void * rbuf, void * lbase, void * rbase, pami_memregion_t * lmr, pami_memregion_t * rmr, size_t sndlen, pami_endpoint_t target ) { pami_rget_simple_t rget; rget.rma.dest = target; rget.rma.bytes = sndlen; rget.rma.cookie = (void*)&_send_active; rget.rma.done_fn = cb_done; rget.rma.hints.buffer_registered = PAMI_HINT_ENABLE; rget.rma.hints.use_rdma = PAMI_HINT_ENABLE; rget.rdma.local.mr = lmr; rget.rdma.local.offset = (size_t)lbuf - (size_t)lbase; rget.rdma.remote.mr = rmr; rget.rdma.remote.offset = (size_t)rbuf - (size_t)rbase; assert (_send_active == 1); #if ENABLE_PROGRESS pami_work_t work; PAMI_Context_post (context, &work, (pami_work_function)PAMI_Rget, (void*)&rget); while (_send_active == 1); #else RC( PAMI_Rget (context, &rget) ); while (_send_active) PAMI_Context_advance (context, POLL_CNT); #endif _send_active = 1; }
bool optiq_pami_transport_forward_test(struct optiq_transport *self) { #ifdef __bgq__ pami_result_t result; struct optiq_pami_transport *pami_transport = (struct optiq_pami_transport *)optiq_transport_get_concrete_transport(self); PAMI_Context_advance (pami_transport->context, 100); if (pami_transport->involved_job_ids.size() > 0) { return false; } #endif return true; }
/** * \brief Blocking 'world geometry' barrier * * This function is provided for illustrative purposes only. One would never * include the retrieval of the world geometry and the query of the barrier * algorithm in a performance critical code. * * \param[in] client The PAMI client; needed to obtain the geometry * \param[in] context The PAMI context; used for the barrier communication */ void simple_barrier (pami_client_t client, pami_context_t context) { pami_result_t result; pami_geometry_t world_geometry; pami_xfer_t xfer; pami_algorithm_t algorithm; pami_metadata_t metadata; /* Retrieve the PAMI 'world' geometry */ result = PAMI_ERROR; result = PAMI_Geometry_world (client, &world_geometry); assert (result == PAMI_SUCCESS); /* Query the 'always works' barrier algorithm in the geometry */ result = PAMI_ERROR; result = PAMI_Geometry_algorithms_query (world_geometry, PAMI_XFER_BARRIER, &algorithm, &metadata, 1, NULL, NULL, 0); assert (result == PAMI_SUCCESS); /* Set up the barrier */ volatile unsigned active = 1; xfer.cb_done = simple_barrier_decrement; xfer.cookie = (void *) & active; xfer.algorithm = algorithm; /* Issue the barrier collective */ result = PAMI_ERROR; result = PAMI_Collective (context, &xfer); assert (result == PAMI_SUCCESS); /* Advance until the barrier has completed */ while (active) { result = PAMI_ERROR; result = PAMI_Context_advance (context, 1); assert (result == PAMI_SUCCESS); } return; }
void put_contig ( pami_context_t context, void * srcbuf, void * dstbuf, void * src_base, void * dst_base, pami_memregion_t * src_mr, pami_memregion_t * dst_mr, size_t sndlen, pami_endpoint_t target ) { //fprintf(stderr, "Calling send dispatch %ld, hdrsize %ld, bytes %ld\n", // dispatch, hdrsize, sndlen); pami_rput_simple_t rput; rput.rma.dest = target; rput.rma.hints.buffer_registered = PAMI_HINT_ENABLE; rput.rma.hints.use_rdma = PAMI_HINT_ENABLE; rput.rma.bytes = sndlen; rput.rma.cookie = (void*)&_send_active; rput.rma.done_fn = NULL; //cb_done; rput.rdma.local.mr = src_mr; rput.rdma.local.offset = (size_t)srcbuf - (size_t)src_base; rput.rdma.remote.mr = dst_mr; rput.rdma.remote.offset = (size_t)dstbuf - (size_t)dst_base; rput.put.rdone_fn = cb_done; #if ENABLE_PROGRESS pami_work_t work; PAMI_Context_post (context, &work, (pami_work_function)PAMI_Rput, (void*)&rput); while (_send_active == 1); #else RC( PAMI_Rput (context, &rput) ); while (_send_active) PAMI_Context_advance (context, POLL_CNT); #endif _send_active = 1; }
bool optiq_pami_transport_test(struct optiq_transport *self, struct optiq_job *job) { bool isDone = true; #ifdef __bgq__ pami_result_t result; struct optiq_pami_transport *pami_transport = (struct optiq_pami_transport *)optiq_transport_get_concrete_transport(self); PAMI_Context_advance (pami_transport->context, 100); optiq_send_cookie *send_cookie; /*Return cookie, messag back to available queues. Adding message sent to flow's*/ while (pami_transport->in_use_send_cookies.size() > 0) { send_cookie = pami_transport->in_use_send_cookies.back(); for (int i = 0; i < job->flows.size(); i++) { if (send_cookie->message->header.flow_id == job->flows[i].id) { job->flows[i].sent_bytes += send_cookie->message->length; } } pami_transport->in_use_send_cookies.pop_back(); (*(pami_transport->avail_send_messages)).push_back(send_cookie->message); pami_transport->avail_send_cookies.push_back(send_cookie); } /*Checking if every flow is done*/ for (int i = 0; i < job->flows.size(); i++) { if (job->flows[i].registered_bytes != job->flows[i].sent_bytes) { isDone = false; #ifdef DEBUG printf("Rank %d at flow_id %d sent %d out of %d bytes\n", self->rank, job->flows[i].id, job->flows[i].sent_bytes, job->flows[i].message->length); #endif } } #endif return isDone; }
int main (int argc, char ** argv) { volatile size_t _rts_active = 1; volatile size_t _ack_active = 1; memset(&null_send_hint, 0, sizeof(null_send_hint)); pami_client_t client; pami_context_t context[2]; char cl_string[] = "TEST"; pami_result_t result = PAMI_ERROR; result = PAMI_Client_create (cl_string, &client, NULL, 0); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami client. result = %d\n", result); return 1; } #ifdef TEST_CROSSTALK size_t num = 2; #else size_t num = 1; #endif result = PAMI_Context_createv(client, NULL, 0, context, num); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami context(s). result = %d\n", result); return 1; } pami_configuration_t configuration; configuration.name = PAMI_CLIENT_TASK_ID; result = PAMI_Client_query(client, &configuration,1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } pami_task_t task_id = configuration.value.intval; fprintf (stderr, "My task id = %d\n", task_id); configuration.name = PAMI_CLIENT_NUM_TASKS; result = PAMI_Client_query(client, &configuration,1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } size_t num_tasks = configuration.value.intval; fprintf (stderr, "Number of tasks = %zu\n", num_tasks); if (num_tasks < 2) { fprintf (stderr, "Error. This test requires at least 2 tasks. Number of tasks in this job: %zu\n", num_tasks); return 1; } pami_dispatch_hint_t options={}; #ifdef USE_SHMEM_OPTION options.use_shmem = PAMI_HINT_ENABLE; fprintf (stderr, "##########################################\n"); fprintf (stderr, "shared memory optimizations forced ON\n"); fprintf (stderr, "##########################################\n"); #elif defined(NO_SHMEM_OPTION) options.use_shmem = PAMI_HINT_DISABLE; fprintf (stderr, "##########################################\n"); fprintf (stderr, "shared memory optimizations forced OFF\n"); fprintf (stderr, "##########################################\n"); #endif size_t i = 0; #ifdef TEST_CROSSTALK for (i=0; i<2; i++) #endif { pami_dispatch_callback_function fn; fprintf (stderr, "Before PAMI_Dispatch_set(%d) .. &_rts_active = %p, _rts_active = %zu\n", DISPATCH_ID_RTS, &_rts_active, _rts_active); fn.p2p = dispatch_rts; result = PAMI_Dispatch_set (context[i], DISPATCH_ID_RTS, fn, (void *)&_rts_active, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result); return 1; } fprintf (stderr, "Before PAMI_Dispatch_set(%d) .. &_ack_active = %p, _ack_active = %zu\n", DISPATCH_ID_ACK, &_ack_active, _ack_active); fn.p2p = dispatch_ack; result = PAMI_Dispatch_set (context[i], DISPATCH_ID_ACK, fn, (void *)&_ack_active, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result); return 1; } } if (task_id == 0) { pami_send_immediate_t parameters; #ifdef TEST_CROSSTALK fprintf (stdout, "PAMI_Rget('simple') functional test [crosstalk]\n"); fprintf (stdout, "\n"); PAMI_Endpoint_create (client, num_tasks-1, 1, ¶meters.dest); #else fprintf (stdout, "PAMI_Rget('simple') functional test\n"); fprintf (stdout, "\n"); PAMI_Endpoint_create (client, num_tasks-1, 0, ¶meters.dest); #endif /* Allocate some memory from the heap. */ void * send_buffer = malloc (BUFFERSIZE); /* Initialize the memory for validation. */ initialize_data ((uint32_t *)send_buffer, BUFFERSIZE, 0); print_data (send_buffer, BUFFERSIZE); /* Send an 'rts' message to the target task and provide the memory region */ rts_info_t rts_info; PAMI_Endpoint_create (client, 0, 0, &rts_info.origin); rts_info.bytes = BUFFERSIZE; /* Create a memory region for this memoru buffer */ size_t bytes = 0; pami_result_t pami_rc = PAMI_Memregion_create (context[0], send_buffer, BUFFERSIZE, &bytes, &(rts_info.memregion)); if (PAMI_SUCCESS != pami_rc) { fprintf (stderr, "PAMI_Memregion_create failed with rc = %d\n", pami_rc) ; exit(1); } parameters.dispatch = DISPATCH_ID_RTS; parameters.header.iov_base = &rts_info; parameters.header.iov_len = sizeof(rts_info_t); parameters.data.iov_base = NULL; parameters.data.iov_len = 0; fprintf (stderr, "Before PAMI_Send_immediate()\n"); PAMI_Send_immediate (context[0], ¶meters); /* wait for the 'ack' */ fprintf (stderr, "Wait for 'ack', _ack_active = %zu\n", _ack_active); while (_ack_active != 0) { result = PAMI_Context_advance (context[0], 100); if (result != PAMI_SUCCESS && result != PAMI_EAGAIN) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } /* Destroy the local memory region */ PAMI_Memregion_destroy (context[0], &(rts_info.memregion)); free (send_buffer); switch (_ack_status) { case 0: fprintf (stdout, "Test PASSED\n"); break; case 1: fprintf (stdout, "Test FAILED (rget error)\n"); break; case 2: fprintf (stdout, "Test FAILED (data error)\n"); break; default: fprintf (stdout, "Test FAILED (unknown error)\n"); break; } } else if (task_id == num_tasks-1) { #ifdef TEST_CROSSTALK size_t contextid = 1; #else size_t contextid = 0; #endif /* wait for the 'rts' */ fprintf (stderr, "Wait for 'rts', _rts_active = %zu, contextid = %zu\n", _rts_active, contextid); while (_rts_active != 0) { result = PAMI_Context_advance (context[contextid], 100); if (result != PAMI_SUCCESS && result != PAMI_EAGAIN) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } } fprintf (stderr, "Test completed .. cleanup\n"); result = PAMI_Context_destroyv (context, num); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result); return 1; } result = PAMI_Client_destroy(&client); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami client. result = %d\n", result); return 1; } /*fprintf (stdout, "Success (%d)\n", task_id); */ return 0; };
int main(int argc, char*argv[]) { pami_client_t client; pami_context_t *context; pami_task_t task_id, local_task_id=0, task_zero = 0; size_t num_tasks; pami_geometry_t world_geometry; /* Barrier variables */ size_t barrier_num_algorithm[2]; pami_algorithm_t *bar_always_works_algo = NULL; pami_metadata_t *bar_always_works_md = NULL; pami_algorithm_t *bar_must_query_algo = NULL; pami_metadata_t *bar_must_query_md = NULL; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; volatile unsigned bar_poll_flag = 0; volatile unsigned newbar_poll_flag = 0; /* Alltoallv variables */ size_t alltoallv_num_algorithm[2]; pami_algorithm_t *alltoallv_always_works_algo = NULL; pami_metadata_t *alltoallv_always_works_md = NULL; pami_algorithm_t *alltoallv_must_query_algo = NULL; pami_metadata_t *alltoallv_must_query_md = NULL; pami_xfer_type_t alltoallv_xfer = PAMI_XFER_ALLTOALLV; volatile unsigned alltoallv_poll_flag = 0; int nalg = 0; double ti, tf, usec; pami_xfer_t barrier; pami_xfer_t alltoallv; /* Process environment variables and setup globals */ setup_env(); assert(gNum_contexts > 0); context = (pami_context_t*)malloc(sizeof(pami_context_t) * gNum_contexts); /* Initialize PAMI */ int rc = pami_init(&client, /* Client */ context, /* Context */ NULL, /* Clientname=default */ &gNum_contexts, /* gNum_contexts */ NULL, /* null configuration */ 0, /* no configuration */ &task_id, /* task id */ &num_tasks); /* number of tasks */ if (rc == 1) return 1; if (num_tasks == 1) { fprintf(stderr, "No subcomms on 1 node\n"); return 0; } assert(task_id >= 0); assert(task_id < num_tasks); /* Allocate buffer(s) */ int err = 0; void* sbuf = NULL; err = posix_memalign((void*) & sbuf, 128, (gMax_byte_count * num_tasks) + gBuffer_offset); assert(err == 0); sbuf = (char*)sbuf + gBuffer_offset; void* rbuf = NULL; err = posix_memalign((void*) & rbuf, 128, (gMax_byte_count * num_tasks) + gBuffer_offset); assert(err == 0); rbuf = (char*)rbuf + gBuffer_offset; sndlens = (size_t*) malloc(num_tasks * sizeof(size_t)); assert(sndlens); sdispls = (size_t*) malloc(num_tasks * sizeof(size_t)); assert(sdispls); rcvlens = (size_t*) malloc(num_tasks * sizeof(size_t)); assert(rcvlens); rdispls = (size_t*) malloc(num_tasks * sizeof(size_t)); assert(rdispls); /* Query the world geometry for barrier algorithms */ rc |= query_geometry_world(client, context[0], &world_geometry, barrier_xfer, barrier_num_algorithm, &bar_always_works_algo, &bar_always_works_md, &bar_must_query_algo, &bar_must_query_md); if (rc == 1) return 1; /* Set up world barrier */ barrier.cb_done = cb_done; barrier.cookie = (void*) & bar_poll_flag; barrier.algorithm = bar_always_works_algo[0]; /* Create the subgeometry */ pami_geometry_range_t *range; int rangecount; pami_geometry_t newgeometry; size_t newbar_num_algo[2]; pami_algorithm_t *newbar_algo = NULL; pami_metadata_t *newbar_md = NULL; pami_algorithm_t *q_newbar_algo = NULL; pami_metadata_t *q_newbar_md = NULL; pami_xfer_t newbarrier; size_t set[2]; int id; range = (pami_geometry_range_t *)malloc(((num_tasks + 1) / 2) * sizeof(pami_geometry_range_t)); int unused_non_task_zero[2]; get_split_method(&num_tasks, task_id, &rangecount, range, &local_task_id, set, &id, &task_zero,unused_non_task_zero); unsigned iContext = 0; for (; iContext < gNum_contexts; ++iContext) { if (task_id == task_zero) printf("# Context: %u\n", iContext); /* Delay task_zero tasks, and emulate that he's doing "other" message passing. This will cause the geometry_create request from other nodes to be unexpected when doing parentless geometries and won't affect parented. */ if (task_id == task_zero) { delayTest(1); unsigned ii = 0; for (; ii < gNum_contexts; ++ii) PAMI_Context_advance (context[ii], 1000); } rc |= create_and_query_geometry(client, context[0], context[iContext], gParentless ? PAMI_GEOMETRY_NULL : world_geometry, &newgeometry, range, rangecount, id + iContext, /* Unique id for each context */ barrier_xfer, newbar_num_algo, &newbar_algo, &newbar_md, &q_newbar_algo, &q_newbar_md); if (rc == 1) return 1; /* Query the sub geometry for alltoallv algorithms */ rc |= query_geometry(client, context[iContext], newgeometry, alltoallv_xfer, alltoallv_num_algorithm, &alltoallv_always_works_algo, &alltoallv_always_works_md, &alltoallv_must_query_algo, &alltoallv_must_query_md); if (rc == 1) return 1; /* Set up sub geometry barrier */ newbarrier.cb_done = cb_done; newbarrier.cookie = (void*) & newbar_poll_flag; newbarrier.algorithm = newbar_algo[0]; for (nalg = 0; nalg < alltoallv_num_algorithm[0]; nalg++) { alltoallv.cb_done = cb_done; alltoallv.cookie = (void*) & alltoallv_poll_flag; alltoallv.algorithm = alltoallv_always_works_algo[nalg]; alltoallv.cmd.xfer_alltoallv.sndbuf = sbuf; alltoallv.cmd.xfer_alltoallv.stype = PAMI_TYPE_BYTE; alltoallv.cmd.xfer_alltoallv.stypecounts = sndlens; alltoallv.cmd.xfer_alltoallv.sdispls = sdispls; alltoallv.cmd.xfer_alltoallv.rcvbuf = rbuf; alltoallv.cmd.xfer_alltoallv.rtype = PAMI_TYPE_BYTE; alltoallv.cmd.xfer_alltoallv.rtypecounts = rcvlens; alltoallv.cmd.xfer_alltoallv.rdispls = rdispls; int k; gProtocolName = alltoallv_always_works_md[nalg].name; for (k = 1; k >= 0; k--) { if (set[k]) { if (task_id == task_zero) { printf("# Alltoallv Bandwidth Test(size:%zu) -- context = %d, task_zero = %d, protocol: %s\n", num_tasks, iContext, task_zero, gProtocolName); printf("# Size(bytes) iterations bytes/sec usec\n"); printf("# ----------- ----------- ----------- ---------\n"); } if (((strstr(alltoallv_always_works_md[nalg].name, gSelected) == NULL) && gSelector) || ((strstr(alltoallv_always_works_md[nalg].name, gSelected) != NULL) && !gSelector)) continue; blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); int i, j; for (i = gMin_byte_count; i <= gMax_byte_count; i *= 2) { size_t dataSent = i; int niter; if (dataSent < CUTOFF) niter = gNiterlat; else niter = NITERBW; for (j = 0; j < num_tasks; j++) { sndlens[j] = rcvlens[j] = i; sdispls[j] = rdispls[j] = i * j; alltoallv_initialize_bufs(sbuf, rbuf, sndlens, rcvlens, sdispls, rdispls, j); } blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); /* Warmup */ blocking_coll(context[iContext], &alltoallv, &alltoallv_poll_flag); blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); ti = timer(); for (j = 0; j < niter; j++) { blocking_coll(context[iContext], &alltoallv, &alltoallv_poll_flag); } tf = timer(); blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); int rc_check; rc |= rc_check = alltoallv_check_rcvbuf(rbuf, rcvlens, rdispls, num_tasks, local_task_id); if (rc_check) fprintf(stderr, "%s FAILED validation\n", gProtocolName); usec = (tf - ti) / (double)niter; if (task_id == task_zero) { printf(" %11lld %16d %14.1f %12.2f\n", (long long)dataSent, niter, (double)1e6*(double)dataSent / (double)usec, usec); fflush(stdout); } } } blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); fflush(stderr); } } /* We aren't testing world barrier itself, so use context 0.*/ blocking_coll(context[0], &barrier, &bar_poll_flag); free(bar_always_works_algo); free(bar_always_works_md); free(bar_must_query_algo); free(bar_must_query_md); free(alltoallv_always_works_algo); free(alltoallv_always_works_md); free(alltoallv_must_query_algo); free(alltoallv_must_query_md); } /*for(unsigned iContext = 0; iContext < gNum_contexts; ++iContexts)*/ sbuf = (char*)sbuf - gBuffer_offset; free(sbuf); rbuf = (char*)rbuf - gBuffer_offset; free(rbuf); free(sndlens); free(sdispls); free(rcvlens); free(rdispls); rc |= pami_shutdown(&client, context, &gNum_contexts); return rc; }
int main(int argc, char ** argv) { pami_client_t client; pami_context_t context; pami_result_t status = PAMI_ERROR; pami_configuration_t pami_config; pami_geometry_t world_geo; size_t barrier_alg_num[2]; pami_algorithm_t* bar_always_works_algo = NULL; pami_metadata_t* bar_always_works_md = NULL; pami_algorithm_t* bar_must_query_algo = NULL; pami_metadata_t* bar_must_query_md = NULL; pami_xfer_t barrier; int my_id; volatile int is_fence_done = 0; volatile int is_barrier_done = 0; /* create PAMI client */ RC( PAMI_Client_create("TEST", &client, NULL, 0) ); DBG_FPRINTF((stderr,"Client created successfully at 0x%p\n",client)); /* create PAMI context */ RC( PAMI_Context_createv(client, NULL, 0, &context, 1) ); DBG_FPRINTF((stderr,"Context created successfully at 0x%p\n",context)); /* query my task id */ bzero(&pami_config, sizeof(pami_configuration_t)); pami_config.name = PAMI_CLIENT_TASK_ID; RC( PAMI_Client_query(client, &pami_config, 1) ); my_id = pami_config.value.intval; DBG_FPRINTF((stderr,"My task id is %d\n", my_id)); /* get the world geometry */ RC( PAMI_Geometry_world(client, &world_geo) ); DBG_FPRINTF((stderr,"World geometry is at 0x%p\n",world_geo)); /* query number of barrier algorithms */ RC( PAMI_Geometry_algorithms_num(world_geo, PAMI_XFER_BARRIER, barrier_alg_num) ); DBG_FPRINTF((stderr,"%d-%d algorithms are available for barrier op\n", barrier_alg_num[0], barrier_alg_num[1])); if (barrier_alg_num[0] <= 0) { fprintf (stderr, "Error. No (%lu) algorithm is available for barrier op\n", barrier_alg_num[0]); return 1; } /* query barrier algorithm list */ bar_always_works_algo = (pami_algorithm_t*)malloc(sizeof(pami_algorithm_t)*barrier_alg_num[0]); bar_always_works_md = (pami_metadata_t*)malloc(sizeof(pami_metadata_t)*barrier_alg_num[0]); bar_must_query_algo = (pami_algorithm_t*)malloc(sizeof(pami_algorithm_t)*barrier_alg_num[1]); bar_must_query_md = (pami_metadata_t*)malloc(sizeof(pami_metadata_t)*barrier_alg_num[1]); RC( PAMI_Geometry_algorithms_query(world_geo, PAMI_XFER_BARRIER, bar_always_works_algo, bar_always_works_md, barrier_alg_num[0], bar_must_query_algo, bar_must_query_md, barrier_alg_num[1]) ); DBG_FPRINTF((stderr,"Algorithm [%s] at 0x%p will be used for barrier op\n", bar_always_works_md[0].name, bar_always_works_algo[0])); /* begin PAMI fence */ RC( PAMI_Fence_begin(context) ); DBG_FPRINTF((stderr,"PAMI fence begins\n")); /* ------------------------------------------------------------------------ */ pami_extension_t extension; const char ext_name[] = "EXT_hfi_extension"; const char sym_name[] = "hfi_remote_update"; hfi_remote_update_fn remote_update = NULL; hfi_remote_update_info_t remote_info; pami_memregion_t mem_region; size_t mem_region_sz = 0; unsigned long long operand = 1234; unsigned long long orig_val = 0; int offset = (operand)%MAX_TABLE_SZ; /* initialize table for remote update operation */ int i; for (i = 0; i < MAX_TABLE_SZ; i ++) { table[i] = (unsigned long long) i; } orig_val = table[offset]; /* open PAMI extension */ RC( PAMI_Extension_open (client, ext_name, &extension) ); DBG_FPRINTF((stderr,"Open %s successfully.\n", ext_name)); /* load PAMI extension function */ remote_update = (hfi_remote_update_fn) PAMI_Extension_symbol (extension, sym_name); if (remote_update == (void *)NULL) { fprintf (stderr, "Error. Failed to load %s function in %s\n", sym_name, ext_name); return 1; } else { DBG_FPRINTF((stderr,"Loaded function %s in %s successfully.\n", sym_name, ext_name)); } /* create a memory region for remote update operation */ RC( PAMI_Memregion_create(context, table, MAX_TABLE_SZ*sizeof(unsigned long long), &mem_region_sz, &mem_region) ); DBG_FPRINTF((stderr,"%d-byte PAMI memory region created successfully.\n", mem_region_sz)); /* perform a PAMI barrier */ is_barrier_done = 0; barrier.cb_done = barrier_done; barrier.cookie = (void*)&is_barrier_done; barrier.algorithm = bar_always_works_algo[0]; RC( PAMI_Collective(context, &barrier) ); DBG_FPRINTF((stderr,"PAMI barrier op invoked successfully.\n")); while (is_barrier_done == 0) PAMI_Context_advance(context, 1000); DBG_FPRINTF((stderr,"PAMI barrier op finished successfully.\n")); RC( PAMI_Context_lock(context) ); /* prepare remote update info */ remote_info.dest = my_id^1; remote_info.op = 0; /* op_add */ remote_info.atomic_operand = operand; remote_info.dest_buf = (unsigned long long)(&(table[offset])); /* invoke remote update PAMI extension function */ RC( remote_update(context, 1, &remote_info) ); DBG_FPRINTF((stderr,"Function %s invoked successfully.\n", sym_name)); RC( PAMI_Context_unlock(context) ); /* perform a PAMI fence */ is_fence_done = 0; RC( PAMI_Fence_all(context, fence_done, (void*)&is_fence_done) ); DBG_FPRINTF((stderr,"PAMI_Fence_all invoked successfully.\n")); while (is_fence_done == 0) PAMI_Context_advance(context, 1000); DBG_FPRINTF((stderr,"PAMI_Fence_all finished successfully.\n")); /* perform a PAMI barrier */ is_barrier_done = 0; barrier.cb_done = barrier_done; barrier.cookie = (void*)&is_barrier_done; barrier.algorithm = bar_always_works_algo[0]; RC( PAMI_Collective(context, &barrier) ); DBG_FPRINTF((stderr,"PAMI barrier op invoked successfully.\n")); while (is_barrier_done == 0) PAMI_Context_advance(context, 1000); DBG_FPRINTF((stderr,"PAMI barrier op finished successfully.\n")); /* verify data after remote update operation */ if (table[offset] != orig_val + operand) { printf("Data verification at offset %d with operand %lu failed: " "[%lu expected with %lu updated]\n", offset, operand, orig_val+operand, table[offset]); } else { printf("Data verification at offset %d with operand %lu passed: " "[%lu expected with %lu updated].\n", offset, operand, orig_val+operand, table[offset]); } /* destroy the memory region after remote update operation */ RC( PAMI_Memregion_destroy(context, &mem_region) ); DBG_FPRINTF((stderr,"PAMI memory region removed successfully.\n")); /* close PAMI extension */ RC( PAMI_Extension_close (extension) ); DBG_FPRINTF((stderr,"Close %s successfully.\n", ext_name)); /* ------------------------------------------------------------------------ */ /* end PAMI fence */ RC( PAMI_Fence_end(context) ); DBG_FPRINTF((stderr,"PAMI fence ends\n")); /* destroy PAMI context */ RC( PAMI_Context_destroyv(&context, 1) ); DBG_FPRINTF((stderr, "PAMI context destroyed successfully\n")); /* destroy PAMI client */ RC( PAMI_Client_destroy(&client) ); DBG_FPRINTF((stderr, "PAMI client destroyed successfully\n")); return 0; }
void test_fn (int argc, char * argv[], pami_client_t client, pami_context_t context[]) { int num_doubles = 16; if (argc > 1) num_doubles = atoi (argv[1]); size_t num_tasks = size (client); pami_task_t my_task_id = task (client); pami_task_t target_task_id = num_tasks - 1; pami_task_t origin_task_id = 0; /* * Allocate a 'window' of memory region information, one for each task in the * client. Only the 'local' memory region information for this task will * contain a valid data buffer. The memory region information is marked * 'active' when the memory regions are received from each remote task. */ memregion_information_t * mr_info = (memregion_information_t *) malloc (sizeof(memregion_information_t) * num_tasks); unsigned i; for (i = 0; i < num_tasks; i++) { mr_info[i].data.iov_len = 0; mr_info[i].data.iov_base = NULL; mr_info[i].active = 0; } /* * Create a local memregion for each context. * * Note that both memregions will describe the same memory location. This is * necessary when writing portable, platform independent code as the physical * hardware underlying the contexts may, or may not, require separate memory * pinning. */ size_t actual_memregion_bytes = 0; mr_info[my_task_id].data.iov_base = malloc (sizeof(double) * num_doubles); mr_info[my_task_id].data.iov_len = sizeof(double) * num_doubles; PAMI_Memregion_create (context[0], mr_info[my_task_id].data.iov_base, mr_info[my_task_id].data.iov_len, & actual_memregion_bytes, & mr_info[my_task_id].memregion[0]); PAMI_Memregion_create (context[1], mr_info[my_task_id].data.iov_base, mr_info[my_task_id].data.iov_len, & actual_memregion_bytes, & mr_info[my_task_id].memregion[1]); mr_info[my_task_id].active = 1; /* * Register the memory region exchange dispatch; only needed on the * first context of each task. */ pami_dispatch_hint_t mr_hint = {0}; pami_dispatch_callback_function mr_dispatch; mr_dispatch.p2p = exchange_memregion_recv_cb; PAMI_Dispatch_set (context[0], MEMREGION_EXCHANGE_DISPATCH_ID, mr_dispatch, (void *) mr_info, mr_hint); accumulate_test_information_t test_info; test_info.data_buffer.iov_base = malloc (sizeof(double) * num_doubles); test_info.data_buffer.iov_len = sizeof(double) * num_doubles; test_info.scalar = 1.2; test_info.data_fn[ACCUMULATE_TEST_SCALAR_SUM] = accumulate_scalar_sum_data_function; test_info.data_cookie[ACCUMULATE_TEST_SCALAR_SUM] = (void *) & test_info.scalar; test_info.data_fn[ACCUMULATE_TEST_VECTOR_SUM] = accumulate_vector_sum_data_function; test_info.data_cookie[ACCUMULATE_TEST_VECTOR_SUM] = malloc (sizeof(double) * num_doubles); test_info.data_fn[ACCUMULATE_TEST_SCALAR_SUBTRACT] = accumulate_scalar_subtract_data_function; test_info.data_cookie[ACCUMULATE_TEST_SCALAR_SUBTRACT] = (void *) & test_info.scalar; test_info.data_fn[ACCUMULATE_TEST_VECTOR_SUBTRACT] = accumulate_vector_subtract_data_function; test_info.data_cookie[ACCUMULATE_TEST_VECTOR_SUBTRACT] = malloc (sizeof(double) * num_doubles); test_info.data_fn[ACCUMULATE_TEST_VECTOR_MAX_SUM] = accumulate_vector_max_sum_data_function; test_info.data_cookie[ACCUMULATE_TEST_VECTOR_MAX_SUM] = malloc (sizeof(double) * num_doubles); test_info.data_fn[ACCUMULATE_TEST_VECTOR_MIN_SUM] = accumulate_vector_min_sum_data_function; test_info.data_cookie[ACCUMULATE_TEST_VECTOR_MIN_SUM] = malloc (sizeof(double) * num_doubles); /* * Register the accumulate dispatch; needed on both * contexts to enable "crosstalk". */ pami_dispatch_hint_t acc_hint = {0}; acc_hint.recv_immediate = PAMI_HINT_DISABLE; pami_dispatch_callback_function acc_dispatch; acc_dispatch.p2p = accumulate_test_recv_cb; PAMI_Dispatch_set (context[0], ACCUMULATE_TEST_DISPATCH_ID, acc_dispatch, (void *) & test_info, acc_hint); PAMI_Dispatch_set (context[1], ACCUMULATE_TEST_DISPATCH_ID, acc_dispatch, (void *) & test_info, acc_hint); simple_barrier(client, context[0]); /* * Exchange the memory regions */ volatile unsigned mr_exchange_active = 0; pami_send_t mr_exchange_parameters = {0}; mr_exchange_parameters.send.dispatch = MEMREGION_EXCHANGE_DISPATCH_ID; mr_exchange_parameters.send.header.iov_base = NULL; mr_exchange_parameters.send.header.iov_len = 0; mr_exchange_parameters.send.data.iov_base = (void *) mr_info[my_task_id].memregion; mr_exchange_parameters.send.data.iov_len = sizeof(pami_memregion_t) * 2; mr_exchange_parameters.events.cookie = (void *) & mr_exchange_active; mr_exchange_parameters.events.local_fn = decrement; for (i = 0; i < num_tasks; i++) { if (i == my_task_id) continue; PAMI_Endpoint_create (client, i, 0, & mr_exchange_parameters.send.dest); mr_exchange_active++; PAMI_Send (context[0], & mr_exchange_parameters); } /* * Advance until local memory regions have been sent and * all memory regions have been received. */ unsigned num_memregions_active; do { num_memregions_active = 0; for (i = 0; i < num_tasks; i++) num_memregions_active += mr_info[i].active; PAMI_Context_advance (context[0], 1); } while (num_memregions_active < num_tasks); while (mr_exchange_active > 0) PAMI_Context_advance (context[0], 1); #ifdef ASYNC_PROGRESS async_progress_t async_progress; async_progress_open (client, &async_progress); async_progress_enable (&async_progress, context[1]); #endif if (my_task_id == target_task_id) { /* * This is the "passive target" task. */ #ifdef ASYNC_PROGRESS /* * Do "something" besides communication for a little bit. */ sleep(1); #else /* * Advance the second context for a little bit. */ fprintf (stdout, "(%03d) spoofing async progress\n", __LINE__); for (i=0; i<10; i++) { fprintf (stdout, "(%03d) 'async progress context' advancing\n", __LINE__); PAMI_Context_advance (context[1], 100000); fprintf (stdout, "(%03d) 'async progress context' sleeping\n", __LINE__); sleep(1); } #endif } else if (my_task_id == origin_task_id) { /* * This is the "active origin" task. */ { /* * Use rdma put to initialize the remote buffer with the local data. */ volatile unsigned rput_active = 1; pami_rput_simple_t rput_parameters = {0}; PAMI_Endpoint_create (client, target_task_id, 1, & rput_parameters.rma.dest); rput_parameters.rma.bytes = num_doubles * sizeof(double); rput_parameters.rdma.local.mr = mr_info[origin_task_id].memregion; rput_parameters.rdma.local.offset = 0; rput_parameters.rdma.remote.mr = mr_info[target_task_id].memregion; rput_parameters.rdma.remote.offset = 0; rput_parameters.put.rdone_fn = decrement; rput_parameters.rma.cookie = (void *) & rput_active; PAMI_Rput (context[0], & rput_parameters); while (rput_active > 0) PAMI_Context_advance (context[0], 1); } { volatile unsigned send_active = 0; accumulate_test_t test_id = ACCUMULATE_TEST_SCALAR_SUM; pami_send_t send_parameters = {0}; PAMI_Endpoint_create (client, target_task_id, 1, & send_parameters.send.dest); send_parameters.send.dispatch = ACCUMULATE_TEST_DISPATCH_ID; send_parameters.send.header.iov_len = sizeof (accumulate_test_t); send_parameters.send.header.iov_base = (void *) & test_id; send_parameters.send.data.iov_base = test_info.data_buffer.iov_base; send_parameters.send.data.iov_len = test_info.data_buffer.iov_len; send_parameters.events.remote_fn = decrement; send_parameters.events.cookie = (void *) & send_active; for (test_id = ACCUMULATE_TEST_SCALAR_SUM; test_id < ACCUMULATE_TEST_COUNT; test_id++) { send_active = 1; fprintf (stdout, "(%03d) sending data buffer for accumulate test \"%s\"\n", __LINE__, accumulate_test_name[test_id]); PAMI_Send (context[0], & send_parameters); fprintf (stdout, "(%03d) waiting for remote completion of data buffer sent for accumulate test \"%s\"\n", __LINE__, accumulate_test_name[test_id]); while (send_active > 0) PAMI_Context_advance (context[0], 1); fprintf (stdout, "(%03d) data buffer received on remote for accumulate test \"%s\"\n", __LINE__, accumulate_test_name[test_id]); } } { /* * Use rdma get to retrieve the remote buffer and compare results. */ volatile unsigned rget_active = 1; pami_rget_simple_t rget_parameters = {0}; PAMI_Endpoint_create (client, target_task_id, 1, & rget_parameters.rma.dest); rget_parameters.rma.done_fn = decrement; rget_parameters.rma.cookie = (void *) & rget_active; rget_parameters.rma.bytes = sizeof(double) * num_doubles; rget_parameters.rdma.local.mr = mr_info[origin_task_id].memregion; rget_parameters.rdma.local.offset = 0; rget_parameters.rdma.remote.mr = mr_info[target_task_id].memregion; rget_parameters.rdma.remote.offset = 0; PAMI_Rget (context[0], & rget_parameters); while (rget_active > 0) PAMI_Context_advance (context[0], 1); } } else { /* * All other tasks, if any, do nothing and simply enter the barrier. */ } simple_barrier (client, context[0]); #ifdef ASYNC_PROGRESS async_progress_disable (&async_progress, context[1]); async_progress_close (&async_progress); #endif /* * Do cleanup ? */ return; }
int main (int argc, char ** argv) { pami_client_t client; pami_context_t context; pami_configuration_t * configuration = NULL; char cl_string[] = "TEST"; pami_result_t result = PAMI_ERROR; result = PAMI_Client_create (cl_string, &client, NULL, 0); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result); return 1; } pami_task_t task = PAMIX_Client_task (client); size_t size = PAMIX_Client_size (client); result = PAMI_Context_createv (client, configuration, 0, &context, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami context. result = %d\n", result); return 1; } /* Attempt to send using a dispatch id that has not been registered. */ char metadata[1024]; char buffer[1024]; volatile unsigned recv_active = 1; volatile unsigned send_active = 1; pami_send_t parameters; parameters.send.dispatch = 10; parameters.send.header.iov_base = metadata; parameters.send.header.iov_len = 8; parameters.send.data.iov_base = buffer; parameters.send.data.iov_len = 8; parameters.events.cookie = (void *) & send_active; parameters.events.local_fn = decrement; parameters.events.remote_fn = NULL; PAMI_Endpoint_create (client, size-1, 0, ¶meters.send.dest); if (task == 0) { result = PAMI_Send (context, ¶meters); if (result == PAMI_SUCCESS) { fprintf (stderr, "Test failure. Expected error when using an unregistered dispatch id.\n"); return 1; } } size_t dispatch = 10; pami_dispatch_callback_function fn; fn.p2p = test_dispatch; pami_dispatch_hint_t options = {}; result = PAMI_Dispatch_set (context, dispatch, fn, (void *) & recv_active, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result); return 1; } if (task == 0) { PAMI_Endpoint_create (client, size-1, 0, ¶meters.send.dest); result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to send using a registered dispatch id.\n"); return 1; } while (send_active) { result = PAMI_Context_advance (context, 1000); if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) ) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } while (recv_active) { result = PAMI_Context_advance (context, 1000); if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) ) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } } else if (task == (size-1)) { PAMI_Endpoint_create (client, 0, 0, ¶meters.send.dest); while (recv_active) { result = PAMI_Context_advance (context, 1000); if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) ) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to send using a registered dispatch id.\n"); return 1; } while (send_active) { result = PAMI_Context_advance (context, 1000); if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) ) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } } result = PAMI_Context_destroyv(&context, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result); return 1; } result = PAMI_Client_destroy(&client); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result); return 1; } return 0; };
int main(int argc, char*argv[]) { pami_client_t client; pami_context_t *context; pami_task_t task_id, local_task_id=0, task_zero=0; size_t num_tasks; pami_geometry_t world_geometry; /* Barrier variables */ size_t barrier_num_algorithm[2]; pami_algorithm_t *bar_always_works_algo = NULL; pami_metadata_t *bar_always_works_md = NULL; pami_algorithm_t *bar_must_query_algo = NULL; pami_metadata_t *bar_must_query_md = NULL; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; volatile unsigned bar_poll_flag = 0; volatile unsigned newbar_poll_flag = 0; /* Allreduce variables */ size_t allreduce_num_algorithm[2]; pami_algorithm_t *allreduce_always_works_algo = NULL; pami_metadata_t *allreduce_always_works_md = NULL; pami_algorithm_t *allreduce_must_query_algo = NULL; pami_metadata_t *allreduce_must_query_md = NULL; pami_xfer_type_t allreduce_xfer = PAMI_XFER_ALLREDUCE; volatile unsigned allreduce_poll_flag = 0; int nalg = 0; double ti, tf, usec; pami_xfer_t barrier; pami_xfer_t allreduce; /* Process environment variables and setup globals */ setup_env(); assert(gNum_contexts > 0); context = (pami_context_t*)malloc(sizeof(pami_context_t) * gNum_contexts); /* Allocate buffer(s) */ int err = 0; void* sbuf = NULL; err = posix_memalign(&sbuf, 128, gMax_byte_count + gBuffer_offset); assert(err == 0); sbuf = (char*)sbuf + gBuffer_offset; void* rbuf = NULL; err = posix_memalign(&rbuf, 128, gMax_byte_count + gBuffer_offset); assert(err == 0); rbuf = (char*)rbuf + gBuffer_offset; /* Initialize PAMI */ int rc = pami_init(&client, /* Client */ context, /* Context */ NULL, /* Clientname=default */ &gNum_contexts, /* gNum_contexts */ NULL, /* null configuration */ 0, /* no configuration */ &task_id, /* task id */ &num_tasks); /* number of tasks */ if (rc == 1) return 1; if (num_tasks == 1) { fprintf(stderr, "No subcomms on 1 node\n"); return 0; } assert(task_id >= 0); assert(task_id < num_tasks); /* Query the world geometry for barrier algorithms */ rc |= query_geometry_world(client, context[0], &world_geometry, barrier_xfer, barrier_num_algorithm, &bar_always_works_algo, &bar_always_works_md, &bar_must_query_algo, &bar_must_query_md); if (rc == 1) return 1; /* Set up world barrier */ barrier.cb_done = cb_done; barrier.cookie = (void*) & bar_poll_flag; barrier.algorithm = bar_always_works_algo[0]; unsigned iContext = 0; /* Create the subgeometry */ pami_geometry_range_t *range; int rangecount; pami_geometry_t newgeometry; size_t newbar_num_algo[2]; pami_algorithm_t *newbar_algo = NULL; pami_metadata_t *newbar_md = NULL; pami_algorithm_t *q_newbar_algo = NULL; pami_metadata_t *q_newbar_md = NULL; pami_xfer_t newbarrier; size_t set[2]; int id; range = (pami_geometry_range_t *)malloc(((num_tasks + 1) / 2) * sizeof(pami_geometry_range_t)); int unused_non_task_zero[2]; get_split_method(&num_tasks, task_id, &rangecount, range, &local_task_id, set, &id, &task_zero,unused_non_task_zero); for (; iContext < gNum_contexts; ++iContext) { if (task_id == task_zero) printf("# Context: %u\n", iContext); /* Delay task_zero tasks, and emulate that he's doing "other" message passing. This will cause the geometry_create request from other nodes to be unexpected when doing parentless geometries and won't affect parented. */ if (task_id == task_zero) { delayTest(1); unsigned ii = 0; for (; ii < gNum_contexts; ++ii) PAMI_Context_advance (context[ii], 1000); } rc |= create_and_query_geometry(client, context[0], context[iContext], gParentless ? PAMI_GEOMETRY_NULL : world_geometry, &newgeometry, range, rangecount, id + iContext, /* Unique id for each context */ barrier_xfer, newbar_num_algo, &newbar_algo, &newbar_md, &q_newbar_algo, &q_newbar_md); if (rc == 1) return 1; /* Query the sub geometry for reduce algorithms */ rc |= query_geometry(client, context[iContext], newgeometry, allreduce_xfer, allreduce_num_algorithm, &allreduce_always_works_algo, &allreduce_always_works_md, &allreduce_must_query_algo, &allreduce_must_query_md); if (rc == 1) return 1; /* Set up sub geometry barrier */ newbarrier.cb_done = cb_done; newbarrier.cookie = (void*) & newbar_poll_flag; newbarrier.algorithm = newbar_algo[0]; for (nalg = 0; nalg < allreduce_num_algorithm[1]; nalg++) { metadata_result_t result = {0}; int i, j, k; for (k = 1; k >= 0; k--) { if (set[k]) { if (task_id == task_zero) { printf("# Allreduce Bandwidth Test(size:%zu) -- context = %d, task = %d protocol: %s, Metadata: range %zu <-> %zd, mask %#X\n",num_tasks, iContext, task_zero, allreduce_must_query_md[nalg].name, allreduce_must_query_md[nalg].range_lo,(ssize_t)allreduce_must_query_md[nalg].range_hi, allreduce_must_query_md[nalg].check_correct.bitmask_correct); printf("# Size(bytes) iterations bytes/sec usec\n"); printf("# ----------- ----------- ----------- ---------\n"); } if (((strstr(allreduce_must_query_md[nalg].name, gSelected) == NULL) && gSelector) || ((strstr(allreduce_must_query_md[nalg].name, gSelected) != NULL) && !gSelector)) continue; gProtocolName = allreduce_must_query_md[nalg].name; unsigned checkrequired = allreduce_must_query_md[nalg].check_correct.values.checkrequired; /*must query every time */ assert(!checkrequired || allreduce_must_query_md[nalg].check_fn); /* must have function if checkrequired. */ allreduce.cb_done = cb_done; allreduce.cookie = (void*) & allreduce_poll_flag; allreduce.algorithm = allreduce_must_query_algo[nalg]; allreduce.cmd.xfer_allreduce.sndbuf = sbuf; allreduce.cmd.xfer_allreduce.rcvbuf = rbuf; allreduce.cmd.xfer_allreduce.rtype = PAMI_TYPE_BYTE; allreduce.cmd.xfer_allreduce.rtypecount = 0; int op, dt; for (dt = 0; dt < dt_count; dt++) for (op = 0; op < op_count; op++) { if (gValidTable[op][dt]) { if (task_id == task_zero) printf("Running Allreduce: %s, %s\n", dt_array_str[dt], op_array_str[op]); for (i = MAX(1,gMin_byte_count/get_type_size(dt_array[dt])); i <= gMax_byte_count/get_type_size(dt_array[dt]); i *= 2) { size_t sz = get_type_size(dt_array[dt]); size_t dataSent = i * sz; int niter; if (dataSent < CUTOFF) niter = gNiterlat; else niter = NITERBW; allreduce.cmd.xfer_allreduce.stypecount = i; allreduce.cmd.xfer_allreduce.rtypecount = dataSent; allreduce.cmd.xfer_allreduce.stype = dt_array[dt]; allreduce.cmd.xfer_allreduce.op = op_array[op]; result = check_metadata(allreduce_must_query_md[nalg], allreduce, dt_array[dt], dataSent, /* metadata uses bytes i, */ allreduce.cmd.xfer_allreduce.sndbuf, PAMI_TYPE_BYTE, dataSent, allreduce.cmd.xfer_allreduce.rcvbuf); if (allreduce_must_query_md[nalg].check_correct.values.nonlocal) { /* \note We currently ignore check_correct.values.nonlocal because these tests should not have nonlocal differences (so far). */ result.check.nonlocal = 0; } if (result.bitmask) continue; reduce_initialize_sndbuf (sbuf, i, op, dt, local_task_id, num_tasks); blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); ti = timer(); for (j = 0; j < niter; j++) { if (checkrequired) /* must query every time */ { result = allreduce_must_query_md[nalg].check_fn(&allreduce); if (result.bitmask) continue; } blocking_coll(context[iContext], &allreduce, &allreduce_poll_flag); } tf = timer(); blocking_coll(context[iContext], &newbarrier, &newbar_poll_flag); int rc_check; rc |= rc_check = reduce_check_rcvbuf (rbuf, i, op, dt, local_task_id, num_tasks); if (rc_check) fprintf(stderr, "%s FAILED validation\n", gProtocolName); usec = (tf - ti) / (double)niter; if (task_id == task_zero) { printf(" %11lld %16d %14.1f %12.2f\n", (long long)dataSent, niter, (double)1e6*(double)dataSent / (double)usec, usec); fflush(stdout); } } } } } } } /* We aren't testing world barrier itself, so use context 0.*/ blocking_coll(context[0], &barrier, &bar_poll_flag); free(newbar_algo); free(newbar_md); free(q_newbar_algo); free(q_newbar_md); free(allreduce_always_works_algo); free(allreduce_always_works_md); free(allreduce_must_query_algo); free(allreduce_must_query_md); } /*for(unsigned iContext = 0; iContext < gNum_contexts; ++iContexts)*/ free(bar_always_works_algo); free(bar_always_works_md); free(bar_must_query_algo); free(bar_must_query_md); sbuf = (char*)sbuf - gBuffer_offset; free(sbuf); rbuf = (char*)rbuf - gBuffer_offset; free(rbuf); rc |= pami_shutdown(&client, context, &gNum_contexts); return rc; }
int main(int argc, char **argv) { MPI_Init(&argc, &argv); optiq_pami_transport_init(); struct optiq_pami_transport *pami_transport = optiq_pami_transport_get(); int rank = pami_transport->rank; int local_rank = 1; int imm_bytes = 128; void *local_buf = malloc (imm_bytes); for (int i = 0; i < imm_bytes; i++) { ((char*)local_buf)[i] = i % 128; } int remote_rank = 3; int iters = 30; int cookie = iters; pami_dispatch_callback_function fn; pami_dispatch_hint_t options = {}; /*Receive memory request*/ fn.p2p = optiq_recv_test_fn; pami_result_t result = PAMI_Dispatch_set (pami_transport->context, OPTIQ_TEST_PAMI_IMM, fn, (void *) &cookie, options); assert(result == PAMI_SUCCESS); if (result != PAMI_SUCCESS) { return 0; } for (int nbytes = 1; nbytes <= imm_bytes; nbytes++) { MPI_Barrier(MPI_COMM_WORLD); uint64_t t0 = GetTimeBase(); if (rank == local_rank) { for (int i = 0; i < iters; i++) { optiq_pami_send_immediate(pami_transport->context, OPTIQ_TEST_PAMI_IMM, NULL, 0, local_buf, nbytes, pami_transport->endpoints[remote_rank]); } while (cookie > 0) { PAMI_Context_advance (pami_transport->context, 100); } } if (rank == remote_rank) { while (cookie > 0) { PAMI_Context_advance (pami_transport->context, 100); } for (int i = 0; i < iters; i++) { optiq_pami_send_immediate(pami_transport->context, OPTIQ_TEST_PAMI_IMM, NULL, 0, local_buf, nbytes, pami_transport->endpoints[local_rank]); } } cookie = iters; uint64_t t1 = GetTimeBase(); double max_t, t = (double)(t1 - t0)/1.6e3/iters; MPI_Reduce(&t, &max_t, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if (pami_transport->rank == 0) { max_t = max_t/2; double bw = (double)nbytes/1024/1024/max_t*1e6; printf("nbytes = %d t = %8.4f(us) bw = %8.4f(MB/s)\n", nbytes, max_t, bw); } } free(local_buf); optiq_pami_transport_finalize(); return 0; }
int main (int argc, char ** argv) { pami_client_t client; pami_context_t context[2]; pami_task_t task_id; size_t num_tasks = 0; size_t ncontexts = 0; size_t errors = 0; pami_result_t result = PAMI_ERROR; pami_type_t subtype; pami_type_t compound_type; pami_type_t simple_type; info_t exchange[MAX_TASKS]; double data[BUFFERSIZE]; volatile unsigned ready; { /* init */ ready = 0; unsigned i; for (i = 0; i < MAX_TASKS; i++) exchange[i].active = 0; for (i = 0; i < BUFFERSIZE; i++) data[i] = E; result = PAMI_Client_create ("TEST", &client, NULL, 0); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami client. result = %d\n", result); return 1; } pami_configuration_t configuration; configuration.name = PAMI_CLIENT_TASK_ID; result = PAMI_Client_query(client, &configuration, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } task_id = configuration.value.intval; /*fprintf (stderr, "My task id = %d\n", task_id);*/ configuration.name = PAMI_CLIENT_NUM_TASKS; result = PAMI_Client_query(client, &configuration, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } num_tasks = configuration.value.intval; /*if (task_id == 0) fprintf (stderr, "Number of tasks = %zu\n", num_tasks);*/ if ((num_tasks < 2) || (num_tasks > MAX_TASKS)) { fprintf (stderr, "Error. This test requires 2-%d tasks. Number of tasks in this job: %zu\n", MAX_TASKS, num_tasks); return 1; } if (task_id == num_tasks - 1) { for (i = 0; i < 320; i++) data[i] = PI; } configuration.name = PAMI_CLIENT_NUM_CONTEXTS; result = PAMI_Client_query(client, &configuration, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } ncontexts = (configuration.value.intval < 2) ? 1 : 2; /*if (task_id == 0) fprintf (stderr, "maximum contexts = %zu, number of contexts used in this test = %zu\n", configuration.value.intval, ncontexts);*/ result = PAMI_Context_createv(client, NULL, 0, context, ncontexts); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami context(s). result = %d\n", result); return 1; } pami_dispatch_hint_t options = {}; for (i = 0; i < ncontexts; i++) { pami_dispatch_callback_function fn; fn.p2p = dispatch_exchange; result = PAMI_Dispatch_set (context[i], EXCHANGE_DISPATCH_ID, fn, (void *) exchange, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable register pami 'exchange' dispatch. result = %d\n", result); return 1; } fn.p2p = dispatch_notify; result = PAMI_Dispatch_set (context[i], NOTIFY_DISPATCH_ID, fn, (void *) & ready, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable register pami 'notify' dispatch. result = %d\n", result); return 1; } } /* *********************************** * Create the pami types ************************************/ /* This compound noncontiguous type is composed of one double, skips a double, * two doubles, skips a double, three doubles, skips a double, five doubles, * skips a double, six doubles, skips a double, seven doubles, skips a double, * eight doubles, then skips two doubles. * * This results in a type with 32 doubles that is 40 doubles * 'wide'. */ PAMI_Type_create (&subtype); PAMI_Type_add_simple (subtype, sizeof(double), /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 2); /* stride */ PAMI_Type_add_simple (subtype, sizeof(double) * 2, /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 3); /* stride */ PAMI_Type_add_simple (subtype, sizeof(double) * 3, /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 4); /* stride */ PAMI_Type_add_simple (subtype, sizeof(double) * 5, /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 6); /* stride */ PAMI_Type_add_simple (subtype, sizeof(double) * 6, /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 7); /* stride */ PAMI_Type_add_simple (subtype, sizeof(double) * 7, /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 8);/* stride */ PAMI_Type_add_simple (subtype, sizeof(double) * 8, /* bytes */ 0, /* offset */ 1, /* count */ sizeof(double) * 10);/* stride */ PAMI_Type_complete (subtype, sizeof(double)); /* This noncontiguous type is composed of the above compound type, repeated * ten times with no stride. * * This results in a type with 320 doubles that is 400 doubles * 'wide'. */ PAMI_Type_create (&compound_type); PAMI_Type_add_typed (compound_type, subtype, /* subtype */ 0, /* offset */ 10, /* count */ sizeof(double) * 32); /* stride */ PAMI_Type_complete (compound_type, sizeof(double)); /* This simple noncontiguous type is composed of eight contiguous doubles, * then skips a _single_ double, repeated 40 times. * * This results in a type with 320 doubles that is 360 doubles 'wide'. */ PAMI_Type_create (&simple_type); PAMI_Type_add_simple (simple_type, sizeof(double) * 8, /* bytes */ 0, /* offset */ 40, /* count */ sizeof(double) * 9); /* stride */ PAMI_Type_complete (simple_type, sizeof(double)); /* Create a memory region for the local data buffer. */ size_t bytes; result = PAMI_Memregion_create (context[0], (void *) data, BUFFERSIZE, &bytes, &exchange[task_id].mr); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create memory region. result = %d\n", result); return 1; } else if (bytes < BUFFERSIZE) { fprintf (stderr, "Error. Unable to create memory region of a large enough size. result = %d\n", result); return 1; } /* Broadcast the memory region to all tasks - including self. */ for (i = 0; i < num_tasks; i++) { pami_send_immediate_t parameters; parameters.dispatch = EXCHANGE_DISPATCH_ID; parameters.header.iov_base = (void *) & bytes; parameters.header.iov_len = sizeof(size_t); parameters.data.iov_base = (void *) & exchange[task_id].mr; parameters.data.iov_len = sizeof(pami_memregion_t); PAMI_Endpoint_create (client, i, 0, ¶meters.dest); result = PAMI_Send_immediate (context[0], ¶meters); } /* Advance until all memory regions have been received. */ for (i = 0; i < num_tasks; i++) { while (exchange[i].active == 0) PAMI_Context_advance (context[0], 100); } } /* init done */ pami_send_immediate_t notify; notify.dispatch = NOTIFY_DISPATCH_ID; notify.header.iov_base = NULL; notify.header.iov_len = 0; notify.data.iov_base = NULL; notify.data.iov_len = 0; volatile size_t active = 1; pami_rget_typed_t parameters; parameters.rma.hints = (pami_send_hint_t) {0}; parameters.rma.cookie = (void *) & active; parameters.rma.done_fn = decrement; parameters.rma.bytes = 320 * sizeof(double); if (task_id == 0) { fprintf (stdout, "PAMI_Rget('typed') functional test %s\n", (ncontexts < 2) ? "" : "[crosstalk]"); fprintf (stdout, "\n"); parameters.rdma.local.mr = &exchange[0].mr; parameters.rdma.remote.mr = &exchange[num_tasks - 1].mr; PAMI_Endpoint_create (client, num_tasks - 1, ncontexts - 1, ¶meters.rma.dest); PAMI_Endpoint_create (client, num_tasks - 1, ncontexts - 1, ¬ify.dest); } else { parameters.rdma.local.mr = &exchange[num_tasks - 1].mr; parameters.rdma.remote.mr = &exchange[0].mr; PAMI_Endpoint_create (client, 0, 0, ¶meters.rma.dest); PAMI_Endpoint_create (client, 0, 0, ¬ify.dest); } /* ******************************************************************** */ /* contiguous -> contiguous transfer test */ /* ******************************************************************** */ if (task_id == 0) { parameters.rdma.local.offset = 0; parameters.rdma.remote.offset = 0; parameters.type.local = PAMI_TYPE_DOUBLE; parameters.type.remote = PAMI_TYPE_DOUBLE; active = 1; PAMI_Rget_typed (context[0], ¶meters); while (active > 0) PAMI_Context_advance (context[0], 100); /* Notify the remote task that the data has been transfered. */ PAMI_Send_immediate (context[0], ¬ify); } else if (task_id == num_tasks - 1) { /* Wait for notification that the data has been transfered. */ while (ready == 0) PAMI_Context_advance (context[ncontexts - 1], 100); ready = 0; } /* ******************************************************************** */ /* contiguous -> non-contiguous transfer test */ /* ******************************************************************** */ if (task_id == num_tasks - 1) { parameters.rdma.local.offset = 4 * 1024; parameters.rdma.remote.offset = 0; parameters.type.local = simple_type; parameters.type.remote = PAMI_TYPE_DOUBLE; active = 1; PAMI_Rget_typed (context[ncontexts - 1], ¶meters); while (active > 0) PAMI_Context_advance (context[ncontexts - 1], 100); /* Notify the remote task that the data has been transfered. */ PAMI_Send_immediate (context[ncontexts - 1], ¬ify); } else if (task_id == 0) { /* Wait for notification that the data has been transfered. */ while (ready == 0) PAMI_Context_advance (context[0], 100); ready = 0; } /* ******************************************************************** */ /* non-contiguous -> non-contiguous transfer test */ /* ******************************************************************** */ if (task_id == 0) { parameters.rdma.local.offset = 4 * 1024; parameters.rdma.remote.offset = 4 * 1024; parameters.type.local = compound_type; parameters.type.remote = simple_type; active = 1; PAMI_Rget_typed (context[0], ¶meters); while (active > 0) PAMI_Context_advance (context[0], 100); /* Notify the remote task that the data has been transfered. */ PAMI_Send_immediate (context[0], ¬ify); } else if (task_id == num_tasks - 1) { /* Wait for notification that the data has been transfered. */ while (ready == 0) PAMI_Context_advance (context[ncontexts - 1], 100); ready = 0; } /* ******************************************************************** */ /* non-contiguous -> contiguous transfer test */ /* ******************************************************************** */ if (task_id == num_tasks - 1) { parameters.rdma.local.offset = 8 * 1024; parameters.rdma.remote.offset = 4 * 1024; parameters.type.local = PAMI_TYPE_DOUBLE; parameters.type.remote = compound_type; active = 1; PAMI_Rget_typed (context[ncontexts - 1], ¶meters); while (active > 0) PAMI_Context_advance (context[ncontexts - 1], 100); /* Notify the remote task that the data has been transfered. */ PAMI_Send_immediate (context[ncontexts - 1], ¬ify); } else if (task_id == 0) { /* Wait for notification that the data has been transfered. */ while (ready == 0) PAMI_Context_advance (context[0], 100); ready = 0; } /* ******************************************************************** */ /* VERIFY data buffers */ /* ******************************************************************** */ if (task_id == num_tasks - 1) { if (task_id == 0) { unsigned i = 0; for (; i < 320; i++) { if (data[i] != PI) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, PI, data[i]); } } for (; i < 512; i++) { if (data[i] != E) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]); } } unsigned j = 0; for (; j < 40; j++) { unsigned n = 0; for (; n < 8; n++) { if (data[i] != PI) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, PI, data[i]); } i++; } if (data[i] != E) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]); } i++; } for (; i < 1024; i++) { if (data[i] != E) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]); } } for (; i < 1024 + 320; i++) { if (data[i] != PI) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, PI, data[i]); } } for (; i < BUFFERSIZE; i++) { if (data[i] != E) { errors++; fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]); } } } } { /* cleanup */ result = PAMI_Context_destroyv(context, ncontexts); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result); return 1; } result = PAMI_Client_destroy(&client); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami client. result = %d\n", result); return 1; } if (task_id == num_tasks - 1) { if (errors) fprintf (stdout, "Test completed with errors (%zu)\n", errors); else fprintf (stdout, "Test completed with success\n"); } } /* cleanup done */ return (errors != 0); };
int main (int argc, char ** argv) { pami_client_t client; pami_context_t context; pami_task_t task; size_t size; pami_dispatch_callback_function fn; pami_dispatch_hint_t options; pami_result_t result = PAMI_ERROR; /* ====== INITIALIZE ====== */ result = PAMI_Client_create ("TEST", &client, NULL, 0); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result); return 1; } task = client_task (client); size = client_size (client); result = PAMI_Context_createv (client, NULL, 0, &context, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami context. result = %d\n", result); return 1; } fn.p2p = dispatch_fn; options.recv_immediate = PAMI_HINT_DEFAULT; result = PAMI_Dispatch_set (context, DISPATCH_ID_DEFAULT_EXPECT_IMMEDIATE, fn, (void *) EXPECT_IMMEDIATE, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to register DISPATCH_ID_DEFAULT_EXPECT_IMMEDIATE. result = %d\n", result); return 1; } options.recv_immediate = PAMI_HINT_DEFAULT; result = PAMI_Dispatch_set (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC, fn, (void *) EXPECT_ASYNC, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to register DISPATCH_ID_DEFAULT_EXPECT_ASYNC. result = %d\n", result); return 1; } options.recv_immediate = PAMI_HINT_ENABLE; result = PAMI_Dispatch_set (context, DISPATCH_ID_ENABLE, fn, (void *) EXPECT_IMMEDIATE, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to register DISPATCH_ID_ENABLE. result = %d\n", result); return 1; } options.recv_immediate = PAMI_HINT_DISABLE; result = PAMI_Dispatch_set (context, DISPATCH_ID_DISABLE, fn, (void *) EXPECT_ASYNC, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to register DISPATCH_ID_DISABLE. result = %d\n", result); return 1; } /* ====== START TEST ====== */ __test_errors = 0; __test_recvs = 0; size_t test_count = 0; volatile size_t send_active = 0; pami_send_t parameters; parameters.send.header.iov_base = __junk; parameters.send.header.iov_len = 0; parameters.send.data.iov_base = __junk; parameters.send.data.iov_len = 0; parameters.send.dispatch = 0; parameters.events.cookie = (void *) & send_active; parameters.events.local_fn = decrement; parameters.events.remote_fn = NULL; result = PAMI_Endpoint_create (client, 0, 0, ¶meters.send.dest); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: PAMI_Endpoint_create() failed for task 0, context 0 with %d.\n", result); return 1; } /* =================================================================== * 'recv_immediate' default * * (header+data) > recv_immediate_max MUST be an asynchronous receive * * A zero-byte send will \b always result in an immediate receive. * \see pami_dispatch_p2p_function * * Data sizes to test: * - recv_immediate_max + 1 * - 0 */ parameters.send.data.iov_len = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC) + 1; parameters.send.dispatch = DISPATCH_ID_DEFAULT_EXPECT_ASYNC; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } parameters.send.data.iov_len = 0; parameters.send.dispatch = DISPATCH_ID_DEFAULT_EXPECT_IMMEDIATE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } /* =================================================================== * 'recv_immediate' enabled * * All receives are 'immediate'. (header+data) > recv_immediate_max is * invalid, but may not neccesarily return an error. * * Data sizes to test: * - 0 * - recv_immediate_max * - recv_immediate_max + 1 ...... ? */ parameters.send.data.iov_len = 0; parameters.send.dispatch = DISPATCH_ID_ENABLE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } parameters.send.data.iov_len = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC); parameters.send.dispatch = DISPATCH_ID_ENABLE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } #if 0 parameters.send.data.iov_len = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC) + 1; parameters.send.dispatch = DISPATCH_ID_ENABLE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %d with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } #endif /* =================================================================== * 'recv_immediate' disabled * * All receives are NOT 'immediate' - even "zero byte data" * * Data sizes to test: * - 0 * - recv_immediate_max * - recv_immediate_max + 1 */ parameters.send.data.iov_len = 0; parameters.send.dispatch = DISPATCH_ID_DISABLE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } parameters.send.data.iov_len = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC); parameters.send.dispatch = DISPATCH_ID_DISABLE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } parameters.send.data.iov_len = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC) + 1; parameters.send.dispatch = DISPATCH_ID_DISABLE; test_count++; if (task == 1) { send_active++; result = PAMI_Send (context, ¶meters); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error: Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result); return 1; } } /* ====== WAIT FOR COMMUNICATION COMPLETION ====== */ if (task == 0) { while (__test_recvs < test_count) PAMI_Context_advance (context, 1000); } else if (task == 1) { while (send_active) PAMI_Context_advance (context, 1000); } /* ====== CLEANUP ====== */ result = PAMI_Context_destroyv (&context, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy context, result = %d\n", result); return 1; } result = PAMI_Client_destroy (&client); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami client. result = %d\n", result); return 1; } /* ====== REPORT ERRORS ====== */ if (__test_errors > 0) { fprintf (stderr, "Error. Non-compliant PAMI receive immediate implementation! error count = %zu\n", __test_errors); return 1; } return 0; }
int main (int argc, char ** argv) { /*volatile size_t send_active = 2; */ volatile size_t send_active = 1; volatile size_t recv_active = 1; pami_client_t client; pami_context_t context; char cl_string[] = "TEST"; pami_result_t result = PAMI_ERROR; fprintf (stderr, "Before Client initialize\n"); result = PAMI_Client_create (cl_string, &client, NULL, 0); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result); return 1; } fprintf (stderr, "After Client initialize\n"); fprintf (stderr, "before context createv\n"); { result = PAMI_Context_createv(client, NULL, 0, &context, 1); } if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to create pami context. result = %d\n", result); return 1; } fprintf (stderr, "after context createv\n"); pami_configuration_t configuration; configuration.name = PAMI_CLIENT_TASK_ID; result = PAMI_Client_query(client, &configuration,1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } size_t task_id = configuration.value.intval; fprintf (stderr, "My task id = %zu\n", task_id); configuration.name = PAMI_CLIENT_NUM_TASKS; result = PAMI_Client_query(client, &configuration,1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result); return 1; } /*size_t num_tasks = configuration.value.intval; */ size_t dispatch = 0; pami_dispatch_callback_function fn; fn.p2p = test_dispatch; pami_dispatch_hint_t options={}; fprintf (stderr, "Before PAMI_Dispatch_set() .. &recv_active = %p, recv_active = %zu\n", &recv_active, recv_active); result = PAMI_Dispatch_set (context, dispatch, fn, (void *)&recv_active, options); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result); return 1; } pami_send_t parameters; parameters.send.dispatch = dispatch; parameters.send.header.iov_base = (void *)&dispatch; /* send *something* */ parameters.send.header.iov_len = sizeof(size_t); parameters.send.data.iov_base = (void *)&dispatch; /* send *something* */ parameters.send.data.iov_len = sizeof(size_t); parameters.events.cookie = (void *) &send_active; parameters.events.local_fn = send_done_local; /*parameters.events.remote_fn = send_done_remote; */ parameters.events.remote_fn = NULL; #if 1 int iter; for (iter=0; iter < 100; iter++) { fprintf (stderr, "before send ...\n"); PAMI_Endpoint_create (client, task_id, 0, ¶meters.send.dest); result = PAMI_Send (context, ¶meters); fprintf (stderr, "... after send.\n"); fprintf (stderr, "before send-recv advance loop (send_active = %zu, recv_active = %zu) ...\n", send_active, recv_active); while (send_active || recv_active) { result = PAMI_Context_advance (context, 100); if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) ) { fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result); return 1; } } fprintf (stderr, "... after send-recv advance loop\n"); send_active = recv_active = 1; } #endif result = PAMI_Context_destroyv(&context, 1); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result); return 1; } result = PAMI_Client_destroy(&client); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result); return 1; } return 0; };
int main(int argc, char*argv[]) { pami_client_t client; pami_context_t *context; pami_geometry_t world_geometry; pami_task_t root_task = 0; /* Barrier variables */ size_t barrier_num_algorithm[2]; pami_algorithm_t *bar_always_works_algo = NULL; pami_metadata_t *bar_always_works_md = NULL; pami_algorithm_t *bar_must_query_algo = NULL; pami_metadata_t *bar_must_query_md = NULL; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; pami_xfer_t barrier; volatile unsigned bar_poll_flag = 0; /* Amscatter variables */ size_t amscatter_num_algorithm[2]; pami_algorithm_t *amscatter_always_works_algo = NULL; pami_metadata_t *amscatter_always_works_md = NULL; pami_algorithm_t *amscatter_must_query_algo = NULL; pami_metadata_t *amscatter_must_query_md = NULL; pami_xfer_type_t amscatter_xfer = PAMI_XFER_AMSCATTER; pami_xfer_t amscatter; volatile unsigned amscatter_total_count = 0; int nalg = 0, i; double ti, tf, usec; /* Process environment variables and setup globals */ setup_env(); assert(gNum_contexts > 0); context = (pami_context_t*)malloc(sizeof(pami_context_t) * gNum_contexts); /* \note Test environment variable" TEST_ROOT=N, defaults to 0.*/ char* sRoot = getenv("TEST_ROOT"); /* Override ROOT */ if (sRoot) root_task = (pami_task_t) atoi(sRoot); /* Initialize PAMI */ int rc = pami_init(&client, /* Client */ context, /* Context */ NULL, /* Clientname=default */ &gNum_contexts, /* gNum_contexts */ NULL, /* null configuration */ 0, /* no configuration */ &my_task_id, /* task id */ &num_tasks); /* number of tasks */ if (rc == 1) return 1; if (gNumRoots > num_tasks) gNumRoots = num_tasks; /* Allocate buffer(s) */ int err = 0; void *sbuf = NULL; err = posix_memalign(&sbuf, 128, (gMax_byte_count * num_tasks) + gBuffer_offset); assert(err == 0); sbuf = (char*)sbuf + gBuffer_offset; void* rbuf = NULL; err = posix_memalign(&rbuf, 128, gMax_byte_count + gBuffer_offset); assert(err == 0); rbuf = (char*)rbuf + gBuffer_offset; void *headers = NULL; err = posix_memalign((void **)&headers, 128, (num_tasks * sizeof(user_header_t)) + gBuffer_offset); headers = (char*)headers + gBuffer_offset; void *validation = NULL; err = posix_memalign((void **)&validation, 128, (num_tasks * sizeof(validation_t)) + gBuffer_offset); validation = (char*)validation + gBuffer_offset; /* Initialize the headers */ for(i = 0; i < num_tasks; ++i) { ((user_header_t *)headers)[i].dst_rank = i; } unsigned iContext = 0; for (; iContext < gNum_contexts; ++iContext) { if (my_task_id == 0) printf("# Context: %u\n", iContext); /* Query the world geometry for barrier algorithms */ rc |= query_geometry_world(client, context[iContext], &world_geometry, barrier_xfer, barrier_num_algorithm, &bar_always_works_algo, &bar_always_works_md, &bar_must_query_algo, &bar_must_query_md); if (rc == 1) return 1; /* Query the world geometry for amscatter algorithms */ rc |= query_geometry_world(client, context[iContext], &world_geometry, amscatter_xfer, amscatter_num_algorithm, &amscatter_always_works_algo, &amscatter_always_works_md, &amscatter_must_query_algo, &amscatter_must_query_md); if (rc == 1) return 1; _g_recv_buffer = rbuf; _g_send_buffer = sbuf; _g_val_buffer = validation; barrier.cb_done = cb_done; barrier.cookie = (void*) & bar_poll_flag; barrier.algorithm = bar_always_works_algo[0]; blocking_coll(context[iContext], &barrier, &bar_poll_flag); amscatter.algorithm = amscatter_always_works_algo[0]; amscatter.cmd.xfer_amscatter.headers = headers; amscatter.cmd.xfer_amscatter.headerlen = sizeof(user_header_t); amscatter.cmd.xfer_amscatter.sndbuf = sbuf; amscatter.cmd.xfer_amscatter.stype = PAMI_TYPE_BYTE; amscatter.cmd.xfer_amscatter.stypecount = 0; for (nalg = 0; nalg < amscatter_num_algorithm[0]; nalg++) { gProtocolName = amscatter_always_works_md[nalg].name; if (my_task_id == root_task) { printf("# AMScatter Bandwidth Test(size:%zu) -- context = %d, root = %d, protocol: %s\n",num_tasks, iContext, root_task, amscatter_always_works_md[nalg].name); printf("# Size(bytes) iterations bytes/sec usec\n"); printf("# ----------- ----------- ----------- ---------\n"); fflush(stdout); } if (((strstr(amscatter_always_works_md[nalg].name,gSelected) == NULL) && gSelector) || ((strstr(amscatter_always_works_md[nalg].name,gSelected) != NULL) && !gSelector)) continue; int j; pami_collective_hint_t h = {0}; pami_dispatch_callback_function fn; lgContext = context[iContext]; fn.amscatter = cb_amscatter_recv; PAMI_AMCollective_dispatch_set(context[iContext], amscatter_always_works_algo[nalg], root_task,/* Set the dispatch id, can be any arbitrary value */ fn, (void*) &amscatter_total_count, h); amscatter.cmd.xfer_amscatter.dispatch = root_task; amscatter.algorithm = amscatter_always_works_algo[nalg]; volatile unsigned *nscatter = &amscatter_total_count; for (i = gMin_byte_count; i <= gMax_byte_count; i *= 2) { size_t dataSent = i; int niter; pami_result_t result; if (dataSent < CUTOFF) niter = gNiterlat; else niter = NITERBW; *nscatter = 0; memset(rbuf, 0xFF, i); scatter_initialize_sndbuf (sbuf, i, num_tasks); blocking_coll(context[iContext], &barrier, &bar_poll_flag); ti = timer(); for (j = 0; j < niter; j++) { root_task = (root_task + num_tasks - 1) % num_tasks; if (my_task_id == root_task) { amscatter.cmd.xfer_amscatter.stypecount = i; result = PAMI_Collective(context[iContext], &amscatter); if (result != PAMI_SUCCESS) { fprintf (stderr, "Error. Unable to issue collective. result = %d\n", result); return 1; } } while (*nscatter <= j) result = PAMI_Context_advance (context[iContext], 1); rc |= _gRc; /* validation return code done in cb_amscatter_done */ } assert(*nscatter == niter); tf = timer(); blocking_coll(context[iContext], &barrier, &bar_poll_flag); usec = (tf - ti) / (double)niter; if(my_task_id == root_task) { printf(" %11lld %16d %14.1f %12.2f\n", (long long)dataSent, niter, (double)1e6*(double)dataSent / (double)usec, usec); fflush(stdout); } } lgContext = NULL; } free(bar_always_works_algo); free(bar_always_works_md); free(bar_must_query_algo); free(bar_must_query_md); free(amscatter_always_works_algo); free(amscatter_always_works_md); free(amscatter_must_query_algo); free(amscatter_must_query_md); } /*for(unsigned iContext = 0; iContext < gNum_contexts; ++iContexts)*/ sbuf = (char*)sbuf - gBuffer_offset; free(sbuf); rbuf = (char*)rbuf - gBuffer_offset; free(rbuf); headers = (char*)headers - gBuffer_offset; free(headers); validation = (char*)validation - gBuffer_offset; free(validation); rc |= pami_shutdown(&client, context, &gNum_contexts); return rc; }
static void * allreduce_test(void* p) { thread_data_t *td = (thread_data_t*)p; pami_context_t myContext = (pami_context_t)td->context; /* Barrier variables */ size_t barrier_num_algorithm[2]; pami_algorithm_t *bar_always_works_algo = NULL; pami_metadata_t *bar_always_works_md = NULL; pami_algorithm_t *bar_must_query_algo = NULL; pami_metadata_t *bar_must_query_md = NULL; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; volatile unsigned bar_poll_flag = 0; /* Allreduce variables */ size_t allreduce_num_algorithm[2]; pami_algorithm_t *allreduce_always_works_algo = NULL; pami_metadata_t *allreduce_always_works_md = NULL; pami_algorithm_t *allreduce_must_query_algo = NULL; pami_metadata_t *allreduce_must_query_md = NULL; pami_xfer_type_t allreduce_xfer = PAMI_XFER_ALLREDUCE; volatile unsigned allreduce_poll_flag = 0; int nalg= 0; double ti, tf, usec; pami_xfer_t barrier; pami_xfer_t allreduce; int rc = 0; /* Allocate buffer(s) */ int err = 0; void* sbuf = NULL; err = posix_memalign(&sbuf, 128, gMax_byte_count + gBuffer_offset); assert(err == 0); sbuf = (char*)sbuf + gBuffer_offset; void* rbuf = NULL; err = posix_memalign(&rbuf, 128, gMax_byte_count + gBuffer_offset); assert(err == 0); rbuf = (char*)rbuf + gBuffer_offset; /* Query the world geometry for barrier algorithms */ rc |= query_geometry(client, myContext, newgeometry, barrier_xfer, barrier_num_algorithm, &bar_always_works_algo, &bar_always_works_md, &bar_must_query_algo, &bar_must_query_md); /* Query the world geometry for allreduce algorithms */ rc |= query_geometry(client, myContext, newgeometry, allreduce_xfer, allreduce_num_algorithm, &allreduce_always_works_algo, &allreduce_always_works_md, &allreduce_must_query_algo, &allreduce_must_query_md); barrier.cb_done = cb_done; barrier.cookie = (void*) & bar_poll_flag; barrier.algorithm = bar_always_works_algo[0]; blocking_coll(myContext, &barrier, &bar_poll_flag); pami_endpoint_t my_ep, zero_ep; PAMI_Endpoint_create(client,task_id,td->tid,&my_ep); PAMI_Endpoint_create(client,0,0,&zero_ep); for (nalg = 0; nalg < allreduce_num_algorithm[0]; nalg++) { if (my_ep == zero_ep) { printf("# Allreduce Bandwidth Test(size:%zu) -- context = %d, protocol: %s\n",num_tasks, td->tid, allreduce_always_works_md[nalg].name); printf("# Size(bytes) iterations bytes/sec usec\n"); printf("# ----------- ----------- ----------- ---------\n"); } if (((strstr(allreduce_always_works_md[nalg].name, gSelected) == NULL) && gSelector) || ((strstr(allreduce_always_works_md[nalg].name, gSelected) != NULL) && !gSelector)) continue; gProtocolName = allreduce_always_works_md[nalg].name; allreduce.cb_done = cb_done; allreduce.cookie = (void*) & allreduce_poll_flag; allreduce.algorithm = allreduce_always_works_algo[nalg]; allreduce.cmd.xfer_allreduce.sndbuf = sbuf; allreduce.cmd.xfer_allreduce.rcvbuf = rbuf; allreduce.cmd.xfer_allreduce.rtype = PAMI_TYPE_BYTE; allreduce.cmd.xfer_allreduce.rtypecount = 0; int op, dt,i,j; for (dt = 0; dt < dt_count; dt++) { for (op = 0; op < op_count; op++) { if (gValidTable[op][dt]) { if (my_ep == zero_ep) printf("Running Allreduce: %s, %s\n", dt_array_str[dt], op_array_str[op]); for (i = MAX(1,gMin_byte_count/get_type_size(dt_array[dt])); i <= gMax_byte_count/get_type_size(dt_array[dt]); i *= 2) { size_t sz=get_type_size(dt_array[dt]); size_t dataSent = i * sz; int niter; if (dataSent < CUTOFF) niter = gNiterlat; else niter = NITERBW; allreduce.cmd.xfer_allreduce.stypecount = i; allreduce.cmd.xfer_allreduce.rtypecount = dataSent; allreduce.cmd.xfer_allreduce.stype = dt_array[dt]; allreduce.cmd.xfer_allreduce.op = op_array[op]; reduce_initialize_sndbuf (sbuf, i, op, dt, td->logical_rank, num_ep); blocking_coll(myContext, &barrier, &bar_poll_flag); ti = timer(); for (j = 0; j < niter; j++) { blocking_coll(myContext, &allreduce, &allreduce_poll_flag); } tf = timer(); /* We aren't testing barrier itself, so use context 0. */ blocking_coll(myContext, &barrier, &bar_poll_flag); int rc_check; rc |= rc_check = reduce_check_rcvbuf (rbuf, i, op, dt, td->logical_rank, num_ep); if (rc_check) fprintf(stderr, "%s FAILED validation\n", gProtocolName); usec = (tf - ti) / (double)niter; if (my_ep == zero_ep) { printf(" %11lld %16d %14.1f %12.2f\n", (long long)dataSent, niter, (double)1e6*(double)dataSent / (double)usec, usec); fflush(stdout); } } } } } } free(bar_always_works_algo); free(bar_always_works_md); free(bar_must_query_algo); free(bar_must_query_md); free(allreduce_always_works_algo); free(allreduce_always_works_md); free(allreduce_must_query_algo); free(allreduce_must_query_md); sbuf = (char*)sbuf - gBuffer_offset; free(sbuf); rbuf = (char*)rbuf - gBuffer_offset; free(rbuf); rc = PAMI_Fence_all (myContext, fence_cb_done, &fence_arrivals); while (fence_arrivals != 0) rc = PAMI_Context_advance (myContext, 1); pthread_exit(NULL); }
int main(int argc, char* argv[]) { pami_result_t result = PAMI_ERROR; if (Kernel_GetRank()==0) print_meminfo(stdout, "before PAMI_Client_create"); /* initialize the client */ char * clientname = ""; pami_client_t client; result = PAMI_Client_create( clientname, &client, NULL, 0 ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Client_create"); /* query properties of the client */ pami_configuration_t config; size_t num_contexts; config.name = PAMI_CLIENT_TASK_ID; result = PAMI_Client_query( client, &config, 1); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query"); world_rank = config.value.intval; config.name = PAMI_CLIENT_NUM_TASKS; result = PAMI_Client_query( client, &config, 1); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query"); world_size = config.value.intval; if ( world_rank == 0 ) { printf("starting test on %ld ranks \n", world_size); fflush(stdout); } config.name = PAMI_CLIENT_PROCESSOR_NAME; result = PAMI_Client_query( client, &config, 1); assert(result == PAMI_SUCCESS); //printf("rank %ld is processor %s \n", world_rank, config.value.chararray); //fflush(stdout); config.name = PAMI_CLIENT_NUM_CONTEXTS; result = PAMI_Client_query( client, &config, 1); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query"); num_contexts = config.value.intval; /* initialize the contexts */ pami_context_t * contexts = NULL; contexts = (pami_context_t *) malloc( num_contexts * sizeof(pami_context_t) ); assert(contexts!=NULL); if (Kernel_GetRank()==0) fprintf(stdout, "num_contexts = %ld \n", (long)num_contexts); result = PAMI_Context_createv( client, &config, 0, contexts, num_contexts ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Context_createv"); /* setup the world geometry */ pami_geometry_t world_geometry; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; size_t num_alg[2]; pami_algorithm_t * safe_barrier_algs = NULL; pami_metadata_t * safe_barrier_meta = NULL; pami_algorithm_t * fast_barrier_algs = NULL; pami_metadata_t * fast_barrier_meta = NULL; result = PAMI_Geometry_world( client, &world_geometry ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Geometry_world"); result = PAMI_Geometry_algorithms_num( world_geometry, barrier_xfer, num_alg ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_algorithms_num"); if ( world_rank == 0 ) printf("number of barrier algorithms = {%ld,%ld} \n", num_alg[0], num_alg[1] ); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Geometry_algorithms_num"); safe_barrier_algs = (pami_algorithm_t *) malloc( num_alg[0] * sizeof(pami_algorithm_t) ); assert(safe_barrier_algs!=NULL); safe_barrier_meta = (pami_metadata_t *) malloc( num_alg[0] * sizeof(pami_metadata_t) ); assert(safe_barrier_meta!=NULL); fast_barrier_algs = (pami_algorithm_t *) malloc( num_alg[1] * sizeof(pami_algorithm_t) ); assert(fast_barrier_algs!=NULL); fast_barrier_meta = (pami_metadata_t *) malloc( num_alg[1] * sizeof(pami_metadata_t) ); assert(fast_barrier_meta!=NULL); result = PAMI_Geometry_algorithms_query( world_geometry, barrier_xfer, safe_barrier_algs, safe_barrier_meta, num_alg[0], fast_barrier_algs, fast_barrier_meta, num_alg[1] ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_algorithms_query"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Geometry_algorithms_query"); /* perform a barrier */ size_t b; pami_xfer_t barrier; volatile int active = 0; for ( b = 0 ; b < num_alg[0] ; b++ ) { barrier.cb_done = cb_done; barrier.cookie = (void*) &active; barrier.algorithm = safe_barrier_algs[b]; uint64_t t0 = GetTimeBase(); active = 1; result = PAMI_Collective( contexts[0], &barrier ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Collective - barrier"); while (active) result = PAMI_Context_advance( contexts[0], 1 ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance - barrier"); uint64_t t1 = GetTimeBase(); if ( world_rank == 0 ) printf("safe barrier algorithm %ld (%s) - took %llu cycles \n", b, safe_barrier_meta[b].name, (long long unsigned int)t1-t0 ); fflush(stdout); } for ( b = 0 ; b < num_alg[1] ; b++ ) { barrier.cb_done = cb_done; barrier.cookie = (void*) &active; barrier.algorithm = fast_barrier_algs[b]; uint64_t t0 = GetTimeBase(); active = 1; result = PAMI_Collective( contexts[0], &barrier ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Collective - barrier"); while (active) result = PAMI_Context_advance( contexts[0], 1 ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance - barrier"); uint64_t t1 = GetTimeBase(); if ( world_rank == 0 ) printf("fast barrier algorithm %ld (%s) - took %llu cycles \n", b, fast_barrier_meta[b].name, (long long unsigned int)t1-t0 ); fflush(stdout); } if (Kernel_GetRank()==0) print_meminfo(stdout, "after barrier tests"); /* finalize the contexts */ result = PAMI_Context_destroyv( contexts, num_contexts ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv"); free(contexts); if (Kernel_GetRank()==0) print_meminfo(stdout, "before PAMI_Client_destroy"); /* finalize the client */ result = PAMI_Client_destroy( &client ); TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy"); if (Kernel_GetRank()==0) print_meminfo(stdout, "after PAMI_Client_destroy"); if ( world_rank == 0 ) { printf("end of test \n"); fflush(stdout); } return 0; }
static void * gatherv_test(void* p) { thread_data_t *td = (thread_data_t*)p; pami_context_t myContext = (pami_context_t)td->context; /* Barrier variables */ size_t barrier_num_algorithm[2]; pami_algorithm_t *bar_always_works_algo = NULL; pami_metadata_t *bar_always_works_md = NULL; pami_algorithm_t *bar_must_query_algo = NULL; pami_metadata_t *bar_must_query_md = NULL; pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER; volatile unsigned bar_poll_flag = 0; /* Gatherv variables */ size_t gatherv_num_algorithm[2]; pami_algorithm_t *gatherv_always_works_algo = NULL; pami_metadata_t *gatherv_always_works_md = NULL; pami_algorithm_t *gatherv_must_query_algo = NULL; pami_metadata_t *gatherv_must_query_md = NULL; pami_xfer_type_t gatherv_xfer = PAMI_XFER_GATHERV; volatile unsigned gatherv_poll_flag = 0; int nalg= 0; double ti, tf, usec; pami_xfer_t barrier; pami_xfer_t gatherv; int rc = 0; if(gNumRoots == -1) gNumRoots = num_ep; /* Allocate buffer(s) */ int err = 0; void* buf = NULL; err = posix_memalign(&buf, 128, (gMax_byte_count * num_ep) + gBuffer_offset); assert(err == 0); buf = (char*)buf + gBuffer_offset; void* rbuf = NULL; err = posix_memalign(&rbuf, 128, (gMax_byte_count * num_ep) + gBuffer_offset); assert(err == 0); rbuf = (char*)rbuf + gBuffer_offset; size_t *lengths = (size_t*)malloc(num_ep * sizeof(size_t)); assert(lengths); size_t *displs = (size_t*)malloc(num_ep * sizeof(size_t)); assert(displs); /* Query the world geometry for barrier algorithms */ rc |= query_geometry(client, myContext, newgeometry, barrier_xfer, barrier_num_algorithm, &bar_always_works_algo, &bar_always_works_md, &bar_must_query_algo, &bar_must_query_md); /* Query the world geometry for gatherv algorithms */ rc |= query_geometry(client, myContext, newgeometry, gatherv_xfer, gatherv_num_algorithm, &gatherv_always_works_algo, &gatherv_always_works_md, &gatherv_must_query_algo, &gatherv_must_query_md); barrier.cb_done = cb_done; barrier.cookie = (void*) & bar_poll_flag; barrier.algorithm = bar_always_works_algo[0]; blocking_coll(myContext, &barrier, &bar_poll_flag); pami_endpoint_t my_ep, zero_ep; PAMI_Endpoint_create(client,task_id,td->tid,&my_ep); PAMI_Endpoint_create(client,0,0,&zero_ep); for (nalg = 0; nalg < gatherv_num_algorithm[0]; nalg++) { gatherv.cb_done = cb_done; gatherv.cookie = (void*) & gatherv_poll_flag; gatherv.algorithm = gatherv_always_works_algo[nalg]; gatherv.cmd.xfer_gatherv.sndbuf = buf; gatherv.cmd.xfer_gatherv.stype = PAMI_TYPE_BYTE; gatherv.cmd.xfer_gatherv.stypecount = 0; gatherv.cmd.xfer_gatherv.rcvbuf = rbuf; gatherv.cmd.xfer_gatherv.rtype = PAMI_TYPE_BYTE; gatherv.cmd.xfer_gatherv.rtypecounts = lengths; gatherv.cmd.xfer_gatherv.rdispls = displs; gProtocolName = gatherv_always_works_md[nalg].name; if (my_ep == zero_ep) { printf("# Gatherv Bandwidth Test(size:%zu) -- context = %d, protocol: %s\n",num_tasks, td->tid, gProtocolName); printf("# Size(bytes) iterations bytes/sec usec\n"); printf("# ----------- ----------- ----------- ---------\n"); } if (((strstr(gatherv_always_works_md[nalg].name,gSelected) == NULL) && gSelector) || ((strstr(gatherv_always_works_md[nalg].name,gSelected) != NULL) && !gSelector)) continue; size_t i, j; for (i = gMin_byte_count; i <= gMax_byte_count; i *= 2) { size_t dataSent = i; int niter; size_t k = 0; for (k = 0; k < num_ep; k++) { lengths[k] = i; displs[k] = k * i; } lengths[k-1] = 0; if (dataSent < CUTOFF) niter = gNiterlat; else niter = NITERBW; blocking_coll(myContext, &barrier, &bar_poll_flag); ti = timer(); int ctxt_id = 0; pami_task_t root_task = 0; for (j = 0; j < niter; j++) { pami_endpoint_t root_ep; PAMI_Endpoint_create(client, root_task, ctxt_id, &root_ep); gatherv.cmd.xfer_gatherv.root = root_ep; gather_initialize_sndbuf(td->logical_rank, buf, i); if (root_ep == zero_ep) memset(rbuf, 0xFF, i*num_ep); if (td->logical_rank != num_ep - 1) gatherv.cmd.xfer_gatherv.stypecount = i; blocking_coll(myContext, &gatherv, &gatherv_poll_flag); if (my_ep == zero_ep) { int rc_check; rc |= rc_check = gather_check_rcvbuf(num_tasks-1, rbuf, i); if (rc_check) fprintf(stderr, "%s FAILED validation\n", gProtocolName); } ctxt_id = (ctxt_id + 1)%gNum_contexts; if(ctxt_id == 0) root_task = (root_task +1)%num_tasks; } tf = timer(); blocking_coll(myContext, &barrier, &bar_poll_flag); usec = (tf - ti) / (double)niter; if (my_ep == zero_ep) { printf(" %11lld %16d %14.1f %12.2f\n", (long long)dataSent, niter, (double)1e6*(double)dataSent / (double)usec, usec); fflush(stdout); } } } free(bar_always_works_algo); free(bar_always_works_md); free(bar_must_query_algo); free(bar_must_query_md); free(gatherv_always_works_algo); free(gatherv_always_works_md); free(gatherv_must_query_algo); free(gatherv_must_query_md); buf = (char*)buf - gBuffer_offset; free(buf); rbuf = (char*)rbuf - gBuffer_offset; free(rbuf); free(lengths); free(displs); rc = PAMI_Fence_all (myContext, fence_cb_done, &fence_arrivals); while (fence_arrivals != 0) rc = PAMI_Context_advance (myContext, 1); pthread_exit(NULL); }