Ejemplo n.º 1
0
int main (int argc, char ** argv)
{
  pami_client_t client;
  pami_context_t context[128];
  pami_configuration_t configuration;
  char                  cl_string[] = "TEST";

  pami_result_t result = PAMI_ERROR;

  result = PAMI_Client_create (cl_string, &client, NULL, 0);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result);
    return 1;
  }

  configuration.name = PAMI_CLIENT_NUM_CONTEXTS;
  result = PAMI_Client_query(client, &configuration, 1);
  size_t max = configuration.value.intval;

  size_t num = max;

  if (getenv("NUM_CONTEXTS")) {
	num = strtoul(getenv("NUM_CONTEXTS"), NULL, 0);
	if (num > 128) num = 128;
	/* if (num > max) num = max; */
  }
  fprintf (stderr, "PAMI_CLIENT_NUM_CONTEXTS = %zu, using %zu\n", max, num);
  size_t tmp = num;

  result = PAMI_Context_createv (client, &configuration, 0, context, num);
  if (result != PAMI_SUCCESS || num != tmp)
  {
    fprintf (stderr, "Error. Unable to create %zu pami context(s). result = %d\n", tmp, result);
    return 1;
  }

  result = PAMI_Context_destroyv (context, num);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to destroy the pami context(s). result = %d\n", result);
    return 1;
  }

  result = PAMI_Client_destroy(&client);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result);
    return 1;
  }

  fprintf (stderr, "Success.\n");

  return 0;
};
Ejemplo n.º 2
0
int main (int argc, char ** argv)
{
  pami_client_t  client[MAX_CLIENTS];
  pami_context_t context[MAX_CLIENTS];
  pami_result_t  result = PAMI_ERROR;
  pami_result_t  result_cl[MAX_CLIENTS] = {PAMI_ERROR, PAMI_ERROR};
  char           cl_string[] = "TEST";
  
  int max_clients=MAX_CLIENTS,i;
  for(i=0;i<max_clients;i++)
  {
    fprintf (stdout, "Creating Client %d of %d\n", i, max_clients);
    result_cl[i] = PAMI_Client_create (cl_string, &client[i], NULL, 0);
    if(result_cl[i] != PAMI_SUCCESS) {
      fprintf(stderr, "--->error creating client %d\n", i);
    } else {
      int num = 1;
      result = PAMI_Context_createv (client[i], NULL, 0, &context[i], num);
      if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable to create pami context(s). result = %d\n", result);
        return 1;
      }
    }
  }
  for(i=0;i<max_clients;i++)
  {
    if (result_cl[i] == PAMI_SUCCESS) {
       int num = 1;
       result = PAMI_Context_destroyv (&context[i], 1);
       if (result != PAMI_SUCCESS)
       {
          fprintf (stderr, "Error. Unable to destroy the pami context(s). result = %d\n", result);
          return 1;
       }
       fprintf (stdout, "Destroying Client %d of %d\n", i, max_clients);
       result = PAMI_Client_destroy(&client[i]);

       if(result!=PAMI_SUCCESS)
         fprintf(stderr, "--->error destroying client %d\n", i);
     }
  }
  return 0;
};
Ejemplo n.º 3
0
int main(int argc, char* argv[])
{
  pami_result_t result = PAMI_ERROR;

  /* initialize the second client */
  char * clientname = "";
  pami_client_t client;
  result = PAMI_Client_create(clientname, &client, NULL, 0);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

  /* query properties of the client */
  pami_configuration_t config[4];
  config[0].name = PAMI_CLIENT_NUM_TASKS;
  config[1].name = PAMI_CLIENT_TASK_ID;
  config[2].name = PAMI_CLIENT_NUM_CONTEXTS;
  config[3].name = PAMI_CLIENT_NUM_LOCAL_TASKS;
  result = PAMI_Client_query(client, config, 4);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  const size_t world_size      = config[0].value.intval;
  const size_t world_rank      = config[1].value.intval;
  const size_t num_contexts    = config[2].value.intval;
  const size_t num_local_tasks = config[3].value.intval;
  TEST_ASSERT(num_contexts>1,"num_contexts>1");

  int ppn    = (int)num_local_tasks;
  int nnodes = world_size/ppn;
  int mycore = world_size%nnodes;
  int mynode = (world_rank-mycore)/ppn;

  if (world_rank==0)
  {
    printf("hello world from rank %ld of %ld, node %d of %d, core %d of %d \n", 
           world_rank, world_size, mynode, nnodes, mycore, ppn );
    fflush(stdout);
  }

  /* initialize the contexts */
  contexts = (pami_context_t *) safemalloc( num_contexts * sizeof(pami_context_t) );

  result = PAMI_Context_createv( client, NULL, 0, contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

  /* setup the world geometry */
  pami_geometry_t world_geometry;
  result = PAMI_Geometry_world(client, &world_geometry );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world");

#ifdef PROGRESS_THREAD
  int status = pthread_create(&Progress_thread, NULL, &Progress_function, NULL);
  TEST_ASSERT(status==0, "pthread_create");
#endif

  /************************************************************************/

  for (int n=1; n<=(256*1024); n*=2)
  {
    if (world_rank==0) 
    {
        printf("starting n = %d \n", n);
        fflush(stdout);
    }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    double * sbuf = safemalloc(world_size*n*sizeof(double));
    double * rbuf = safemalloc(world_size*n*sizeof(double));

    for (int s=0; s<world_size; s++ )
      for (int k=0; k<n; k++)
        sbuf[s*n+k] = world_rank*n+k;

    for (int s=0; s<world_size; s++ )
      for (int k=0; k<n; k++)
        rbuf[s*n+k] = -1.0;

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    size_t bytes = world_size * n * sizeof(double), bytes_out;

    pami_memregion_t shared_mr;
    result = PAMI_Memregion_create(contexts[1], rbuf, bytes, &bytes_out, &shared_mr);
    TEST_ASSERT(result == PAMI_SUCCESS && bytes==bytes_out,"PAMI_Memregion_create");
 
    pami_memregion_t local_mr;
    result = PAMI_Memregion_create(contexts[0], sbuf, bytes, &bytes_out, &local_mr);
    TEST_ASSERT(result == PAMI_SUCCESS && bytes==bytes_out,"PAMI_Memregion_create");
 
    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    pami_endpoint_t * target_eps = (pami_endpoint_t *) safemalloc( world_size * sizeof(pami_endpoint_t) );
    for (int target=0; target<world_size; target++)
    {
        result = PAMI_Endpoint_create(client, (pami_task_t) target, 1 /* async context*/, &(target_eps[target]) );
        TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Endpoint_create");
    }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    pami_memregion_t * shmrs = (pami_memregion_t *) safemalloc( world_size * sizeof(pami_memregion_t) );

    result = allgather(world_geometry, contexts[0], sizeof(pami_memregion_t), &shared_mr, shmrs);
    TEST_ASSERT(result == PAMI_SUCCESS,"allgather");

    if (world_rank==0) 
    {
        printf("starting A2A \n");
        fflush(stdout);
    }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

#ifdef SEPARATE_COMPLETION
    done_t active = { .local  = world_size, 
                      .remote = world_size  };
#else
    int active = world_size;
#endif

    uint64_t t0 = GetTimeBase();

    for (int count=0; count<world_size; count++)
    {
        int t = world_rank+count;
        int target = t%world_size;

        //printf("%ld: attempting Rput to %ld (bytes=%ld,loff=%ld, roff=%ld) \n", 
        //       (long)world_rank, (long)target, bytes, n*sizeof(double),
        //       target*n*sizeof(double), world_rank*n*sizeof(double));
        //printf("%ld: attempting Rput to %ld \n", (long)world_rank, (long)target),
        //fflush(stdout);


        pami_rput_simple_t parameters;
        parameters.rma.dest           = target_eps[target];
        //parameters.rma.hints          = ;
        parameters.rma.bytes          = n*sizeof(double);
        parameters.rma.cookie         = &active;
#ifdef SEPARATE_COMPLETION
        parameters.rma.done_fn        = cb_done_local;
        parameters.put.rdone_fn       = cb_done_remote;
#else
        parameters.rma.done_fn        = NULL;
        parameters.put.rdone_fn       = cb_done;
#endif
        parameters.rdma.local.mr      = &local_mr;
        parameters.rdma.local.offset  = target*n*sizeof(double);
        parameters.rdma.remote.mr     = &shmrs[target];
        parameters.rdma.remote.offset = world_rank*n*sizeof(double);

        result = PAMI_Rput(contexts[0], &parameters);
        TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Rput");
    }

#ifdef SEPARATE_COMPLETION
    while (active.local>0)
    {
      result = PAMI_Context_trylock_advancev(&(contexts[0]), 1, 1000);
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");
    }
#endif

    uint64_t t1 = GetTimeBase();
    double  dt1 = (t1-t0)*tic;

#ifdef SEPARATE_COMPLETION
    while (active.remote>0)
#else
    while (active>0)
#endif
    {
      result = PAMI_Context_trylock_advancev(&(contexts[0]), 1, 1000);
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");
    }

    uint64_t t2 = GetTimeBase();
    double  dt2 = (t2-t0)*tic;

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    double megabytes = 1.e-6*bytes;

    printf("%ld: PAMI_Rput A2A: %ld bytes per rank, local %lf seconds (%lf MB/s), remote %lf seconds (%lf MB/s) \n", 
           (long)world_rank, n*sizeof(double), 
           dt1, megabytes/dt1,
           dt2, megabytes/dt2 );
    fflush(stdout);

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    for (int s=0; s<world_size; s++ )
      for (int k=0; k<n; k++)
      {
        if (rbuf[s*n+k]!=(1.0*s*n+1.0*k))
          printf("%4d: rbuf[%d] = %lf (%lf) \n", (int)world_rank, s*n+k, rbuf[s*n+k], (1.0*s*n+1.0*k) );
      }
    fflush(stdout);

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    result = PAMI_Memregion_destroy(contexts[0], &shared_mr);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Memregion_destroy");
 
    result = PAMI_Memregion_destroy(contexts[0], &local_mr);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Memregion_destroy");
 
    free(target_eps);
    free(shmrs);
    free(rbuf);
    free(sbuf);
  }

  /************************************************************************/

#ifdef PROGRESS_THREAD
  void * rv;

  status = pthread_cancel(Progress_thread);
  TEST_ASSERT(status==0, "pthread_cancel");

  status = pthread_join(Progress_thread, &rv);
  TEST_ASSERT(status==0, "pthread_join");
#endif

  result = barrier(world_geometry, contexts[0]);
  TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

  /* finalize the contexts */
  result = PAMI_Context_destroyv( contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

  free(contexts);

  /* finalize the client */
  result = PAMI_Client_destroy( &client );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

  if (world_rank==0)
    printf("%ld: end of test \n", world_rank );
  fflush(stdout);

  return 0;
}
Ejemplo n.º 4
0
int main(int argc, char ** argv)
{
  pami_client_t     client;
  pami_context_t    context;
  pami_result_t     status = PAMI_ERROR;
  
  /* create PAMI client */
  RC( PAMI_Client_create("TEST", &client, NULL, 0) );
  DBG_FPRINTF((stderr,"Client created successfully at 0x%p\n",client));

  /* create PAMI context */
  RC( PAMI_Context_createv(client, NULL, 0, &context, 1) );
  DBG_FPRINTF((stderr,"Context created successfully at 0x%p\n",context));

  /* ------------------------------------------------------------------------ */

  pami_extension_t      extension;
  const char            ext_name[] = "EXT_hfi_extension";
  const char            sym_name[] = "hfi_pkt_counters";
  hfi_pkt_counters_fn   hfi_counters = NULL;
  hfi_pkt_counter_t     pkt_counter;

  /* open PAMI extension */
  RC( PAMI_Extension_open (client, ext_name, &extension) );
  DBG_FPRINTF((stderr,"Open %s successfully.\n", ext_name));

  /* load PAMI extension function */
  hfi_counters = (hfi_pkt_counters_fn) 
      PAMI_Extension_symbol (extension, sym_name);
  if (hfi_counters == (void *)NULL)
  {
    fprintf (stderr, "Error. Failed to load %s function in %s\n",
             sym_name, ext_name); 
    return 1;
  } 
  DBG_FPRINTF((stderr,"Loaded function %s in %s successfully.\n", 
              sym_name, ext_name));

  /* invoke PAMI extension function */
  RC( hfi_counters(context, &pkt_counter) );
  DBG_FPRINTF((stderr,"Function %s invoked successfully.\n", 
              sym_name));
  printf( "Pkt sent =         %lu\n"
          "Pkt sent dropped = %lu\n"
          "Ind pkt sent =     %lu\n"
          "Pkt recv =         %lu\n"
          "Pkt recv dropped = %lu\n"
          "Ind pkt recv =     %lu\n"
          "Imm pkt sent =     %lu\n",
          pkt_counter.total_packets_sent,
          pkt_counter.packets_send_drop,
          pkt_counter.indicate_packet_sent,
          pkt_counter.total_packets_recv,
          pkt_counter.packets_recv_drop,
          pkt_counter.indicate_packet_recv,
          pkt_counter.immediate_packet_sent);

  /* close PAMI extension */
  RC( PAMI_Extension_close (extension) );
  DBG_FPRINTF((stderr,"Close %s successfully.\n", ext_name));

  /* ------------------------------------------------------------------------ */
  /* destroy PAMI context */
  RC( PAMI_Context_destroyv(&context, 1) );
  DBG_FPRINTF((stderr, "PAMI context destroyed successfully\n"));

  /* destroy PAMI client */
  RC( PAMI_Client_destroy(&client) );
  DBG_FPRINTF((stderr, "PAMI client destroyed successfully\n"));

  return 0;
}
Ejemplo n.º 5
0
int main (int argc, char ** argv)
{
    pami_client_t           client;
    pami_context_t          context;
    pami_result_t           result = PAMI_ERROR;
    pami_configuration_t*   configuration = NULL;
    char                    cl_string[] = "TEST";
    pthread_t               threads[MAX_THREAD_NUM];
    int                     i, rc;

    result = PAMI_Client_create (cl_string, &client, NULL, 0);
    if (result != PAMI_SUCCESS)
    {
        fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result);
        return 1;
    }

    result = PAMI_Context_createv(client, configuration, 0, &context, 1);
    if (result != PAMI_SUCCESS)
    {
        fprintf (stderr, "Error. Unable to create the pami context. result = %d\n", result);
        return 1;
    }

    /* Test a context lock */
    result = PAMI_Context_lock (context);
    if (result != PAMI_SUCCESS)
    {
        fprintf (stderr, "Error. Unable to lock the pami context. result = %d\n", result);
        return 1;
    }

    /* Test a context unlock */
    result = PAMI_Context_unlock (context);
    if (result != PAMI_SUCCESS)
    {
        fprintf (stderr, "Error. Unable to unlock the pami context. result = %d\n", result);
        return 1;
    }

    // test context trylock with multiple threads
    for (i = 0; i < MAX_THREAD_NUM; i ++) {
        rc = pthread_create(&threads[i], NULL, thread_routine, (void*)context);
        if (rc) {
            fprintf (stderr,
                     "Error. Unable to create %d-th thread with rc %d\n",
                     i, rc);
            return 1;
        }
    }

    // join all the created threads
    for (i = 0; i < MAX_THREAD_NUM; i ++) {
        do {
            rc = pthread_join(threads[i], NULL);
            if (rc) {
                fprintf (stderr,
                         "Error. Unable to join %d-th thread with rc %d\n",
                         i, rc);
            }
        } while (0 != rc);
    }

    // check the common data
    if ( MAX_THREAD_NUM == keys ) {
        fprintf (stderr,
                 "Trylock with multiple threads passed. %d (%d expected)\n",
                 keys, MAX_THREAD_NUM);
    } else {
        fprintf (stderr,
                 "Error. Trylock with multiple threads failed. %d (%d expected)\n",
                 keys, MAX_THREAD_NUM);
    }

    /* Destroy the context */
    result = PAMI_Context_destroyv(&context, 1);
    if (result != PAMI_SUCCESS)
    {
        fprintf (stderr, "Error. Unable to destroy the pami context. result = %d\n", result);
        return 1;
    }

    /* Finalize (destroy) the client */
    result = PAMI_Client_destroy(&client);
    if (result != PAMI_SUCCESS)
    {
        fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result);
        return 1;
    }

    fprintf (stderr, "Success.\n");

    return 0;
};
Ejemplo n.º 6
0
int main(int argc, char ** argv) {
	pami_client_t client;
	pami_context_t context;
	size_t num_contexts = 1;
	pami_task_t task_id;
	size_t num_tasks;
	pami_geometry_t world_geometry;
	/* Barrier variables */
	size_t num_algorithms[2];
	pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER;
	pami_xfer_t barrier;
#if !defined(__bgq__)
	pami_extension_t hfi_extension;
	hfi_remote_update_fn hfi_update;
#endif
	int numAlgorithms = 6;
	pami_xfer_type_t algorithms[] = {PAMI_XFER_BROADCAST, PAMI_XFER_BARRIER,
			PAMI_XFER_SCATTER, PAMI_XFER_ALLTOALL, PAMI_XFER_ALLREDUCE, PAMI_XFER_ALLGATHER};
	const char* algorithmNames[] = {"PAMI_XFER_BROADCAST", "PAMI_XFER_BARRIER",
				"PAMI_XFER_SCATTER", "PAMI_XFER_ALLTOALL", "PAMI_XFER_ALLREDUCE", "PAMI_XFER_ALLGATHER"};

	const char    *name = "X10";
	setenv("MP_MSG_API", name, 1);
	pami_configuration_t config;
	config.name = PAMI_GEOMETRY_OPTIMIZE;
	config.value.intval = 1;

	pami_result_t status = PAMI_Client_create(name, &client, &config, 1);
	if (status != PAMI_SUCCESS)
		error("Unable to initialize PAMI client\n");

	if ((status = PAMI_Context_createv(client, &config, 1, &context, 1)) != PAMI_SUCCESS)
		error("Unable to initialize the PAMI context: %i\n", status);

#if !defined(__bgq__)
	status = PAMI_Extension_open (client, "EXT_hfi_extension", &hfi_extension);
	if (status == PAMI_SUCCESS)
	{
		#ifdef __GNUC__
		__extension__
		#endif
		hfi_update = (hfi_remote_update_fn) PAMI_Extension_symbol(hfi_extension, "hfi_remote_update"); // This may succeed even if HFI is not available
	}
#endif

	status = PAMI_Geometry_world(client, &world_geometry);
		if (status != PAMI_SUCCESS) error("Unable to create the world geometry");

	pami_configuration_t configuration[2];
	configuration[0].name = PAMI_CLIENT_TASK_ID;
	configuration[1].name = PAMI_CLIENT_NUM_TASKS;
	if ((status = PAMI_Client_query(client, configuration, 2)) != PAMI_SUCCESS)
		error("Unable to query the PAMI_CLIENT: %i\n", status);
	int myPlaceId = configuration[0].value.intval;

	if (myPlaceId == 0)
	{
		for (int i = 0; i < numAlgorithms; i++) {
			status = PAMI_Geometry_algorithms_num(world_geometry, algorithms[i], num_algorithms);
			if (status != PAMI_SUCCESS || num_algorithms[0] == 0)
				error("Unable to query the algorithm counts for barrier\n");

			// query what the different algorithms are
			pami_algorithm_t *always_works_alg = (pami_algorithm_t*) alloca(sizeof(pami_algorithm_t)*num_algorithms[0]);
			pami_metadata_t	*always_works_md = (pami_metadata_t*) alloca(sizeof(pami_metadata_t)*num_algorithms[0]);
			pami_algorithm_t *must_query_alg = (pami_algorithm_t*) alloca(sizeof(pami_algorithm_t)*num_algorithms[1]);
			pami_metadata_t	*must_query_md = (pami_metadata_t*) alloca(sizeof(pami_metadata_t)*num_algorithms[1]);
			status = PAMI_Geometry_algorithms_query(world_geometry, algorithms[i], always_works_alg,
					always_works_md, num_algorithms[0], must_query_alg, must_query_md, num_algorithms[1]);
			if (status != PAMI_SUCCESS)
				error("Unable to query the supported algorithm %s for world", algorithmNames[i]);

			// print out
			printf("Collective: %s\n", algorithmNames[i]);
			printf("Always supported: %i algorithms\n", num_algorithms[0]);
			for (int j=0; j<num_algorithms[0]; j++)
				printf("\t%s = %i\n", always_works_md[j].name, j);
			printf("Locally supported: %i algorithms\n", num_algorithms[1]);
			for (int j=0; j<num_algorithms[1]; j++)
				printf("\t%s = %i\n", must_query_md[j].name, num_algorithms[0]+j);
			printf("\n");
		}
	}

#if !defined(__bgq__)
	PAMI_Extension_close (hfi_extension);
#endif

	if ((status = PAMI_Context_destroyv(&context, 1)) != PAMI_SUCCESS)
		fprintf(stderr, "Error closing PAMI context: %i\n", status);
	if ((status = PAMI_Client_destroy(&client)) != PAMI_SUCCESS)
		fprintf(stderr, "Error closing PAMI client: %i\n", status);
	return 0;
}
Ejemplo n.º 7
0
int main(int argc, char ** argv)
{
  pami_client_t client;
  pami_context_t context;
  pami_result_t status = PAMI_ERROR;

  status = PAMI_Client_create("TEST", &client, NULL, 0);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", status);
    return 1;
  }
  DBG_FPRINTF((stderr,"Client %p\n",client));

  status = PAMI_Context_createv(client, NULL, 0, &context, 1);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to create pami context. result = %d\n", status);
    return 1;
  }

  /* ------------------------------------------------------------------------ */

  pami_extension_t extension;
  status = PAMI_Extension_open (client, "EXT_torus_network", &extension);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. The \"EXT_torus_network\" extension is not implemented. result = %d\n", status);
    return 1;
  }

  pami_extension_torus_information_fn pamix_torus_info =
    (pami_extension_torus_information_fn) PAMI_Extension_symbol (extension, "information");
  if (pamix_torus_info == (void *)NULL)
  {
    fprintf (stderr, "Error. The \"EXT_torus_network\" extension function \"information\" is not implemented. result = %d\n", status);
    return 1;
  }

  const pami_extension_torus_information_t * info = pamix_torus_info ();

  fprintf (stdout, "Torus Dimensions:  %zu\n", info->dims);

  char str[1024];
  size_t i, nchars;

  for (nchars=i=0; i<(info->dims-1); i++)
    nchars += snprintf (&str[nchars],1023-nchars, "%zu,", info->coord[i]);
  nchars += snprintf (&str[nchars],1023-nchars, "%zu", info->coord[info->dims-1]);
  fprintf (stdout, "Torus Coordinates: [%s]\n", str);

  for (nchars=i=0; i<(info->dims-1); i++)
    nchars += snprintf (&str[nchars],1023-nchars, "%zu,", info->size[i]);
  nchars += snprintf (&str[nchars],1023-nchars, "%zu", info->size[info->dims-1]);
  fprintf (stdout, "Torus Size:        [%s]\n", str);

  for (nchars=i=0; i<(info->dims-1); i++)
    nchars += snprintf (&str[nchars],1023-nchars, "%zu,", info->torus[i]);
  nchars += snprintf (&str[nchars],1023-nchars, "%zu", info->torus[info->dims-1]);
  fprintf (stdout, "Torus Wrap:        [%s]\n", str);



  pami_extension_torus_task2torus_fn pamix_torus_task2torus =
    (pami_extension_torus_task2torus_fn) PAMI_Extension_symbol (extension, "task2torus");
  if (pamix_torus_task2torus == (void *)NULL)
  {
    fprintf (stderr, "Error. The \"EXT_torus_network\" extension function \"task2torus\" is not implemented. result = %d\n", status);
    return 1;
  }

  pami_task_t task = 1;
  size_t * coord = (size_t *) malloc (sizeof(size_t) * info->dims);
  status = pamix_torus_task2torus (task, coord);
  if (status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error.  Unable to query the torus coordinates of task 1\n");
    return 1;
  }

  for (nchars=i=0; i<(info->dims-1); i++)
    nchars += snprintf (&str[nchars],1023-nchars, "%zu,", coord[i]);
  nchars += snprintf (&str[nchars],1023-nchars, "%zu", coord[i]);
  fprintf (stdout, "Task 1 Torus Coordinates:     [%s]\n", str);


  pami_extension_torus_torus2task_fn pamix_torus_torus2task =
    (pami_extension_torus_torus2task_fn) PAMI_Extension_symbol (extension, "torus2task");
  if (pamix_torus_torus2task == (void *)NULL)
  {
    fprintf (stderr, "Error. The \"EXT_torus_network\" extension function \"torus2task\" is not implemented. result = %d\n", status);
    return 1;
  }

  /*coord[0] = 0; */
  /*coord[1] = 0; */
  /*coord[2] = 0; */
  /*coord[3] = 1; */
  status = pamix_torus_torus2task (coord, &task);
  if (status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error.  Unable to query the task for coordinates [%zu,%zu,%zu,%zu]\n",coord[0],coord[1],coord[2],coord[3]);
    return 1;
  }
  for (nchars=i=0; i<(info->dims-1); i++)
    nchars += snprintf (&str[nchars],1023-nchars, "%zu,", coord[i]);
  nchars += snprintf (&str[nchars],1023-nchars, "%zu", coord[i]);
  fprintf (stdout, "Task at Torus Coordinates [%s]: %d\n", str, task);;






  status = PAMI_Extension_close (extension);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. The \"EXT_torus_network\" extension could not be closed. result = %d\n", status);
    return 1;
  }


  /* ------------------------------------------------------------------------ */
  DBG_FPRINTF((stderr, "PAMI_Context_destroyv(&context, 1);\n"));
  status = PAMI_Context_destroyv(&context, 1);
  if(status != PAMI_SUCCESS)
  {
    fprintf(stderr, "Error. Unable to destroy pami context. result = %d\n", status);
    return 1;
  }

  DBG_FPRINTF((stderr, "PAMI_Client_destroy(&client);\n"));
  status = PAMI_Client_destroy(&client);
  if(status != PAMI_SUCCESS)
  {
    fprintf(stderr, "Error. Unable to finalize pami client. result = %d\n", status);
    return 1;
  }

  DBG_FPRINTF((stderr, "return 0;\n"));
  return 0;
}
Ejemplo n.º 8
0
int main ()
{
  pami_client_t client;
  pami_context_t *context;
  pami_result_t result;
  pami_configuration_t configuration;
  
  PAMI_Client_create ("TEST", &client, NULL, 0);
  
  configuration.name = PAMI_CLIENT_NUM_CONTEXTS;
  result = PAMI_Client_query(client, &configuration, 1);
  size_t num = configuration.value.intval;
  context = (pami_context_t*) malloc (num*sizeof(pami_context_t));
  
  /* Create four contexts - every task creates the same number */
  PAMI_Context_createv (client, NULL, 0, context, num);

  createEndpointTable (client, num);


  pami_dispatch_callback_function fn;
  fn.p2p = test_dispatch;
  pami_dispatch_hint_t options = {0};
  pami_send_hint_t hints = {0};
  volatile size_t expect = 0;

  size_t i;
  for (i=0; i<num; i++)
  {
    PAMI_Context_lock (context[i]);
    result = PAMI_Dispatch_set (context[i], 0, fn, (void *)&expect, options);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result);
      return 1;
    }
  }

  configuration.name = PAMI_CLIENT_TASK_ID;
  result = PAMI_Client_query(client, &configuration, 1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
    return 1;
  }
  pami_task_t task_id = configuration.value.intval;
  fprintf (stderr, "My task id = %d\n", task_id);

  configuration.name = PAMI_CLIENT_NUM_TASKS;
  result = PAMI_Client_query(client, &configuration, 1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
    return 1;
  }
  size_t num_tasks = configuration.value.intval;
  fprintf (stderr, "Number of tasks = %zu\n", num_tasks);

  uint8_t header[16];
  uint8_t data[1024];
  volatile size_t active = 1;

  pami_send_t parameters;
  parameters.send.dispatch        = 0;
  parameters.send.header.iov_base = header;
  parameters.send.header.iov_len  = 16;
  parameters.send.data.iov_base   = data;
  parameters.send.data.iov_len    = 1024;
  parameters.send.hints           = hints;
  parameters.events.cookie        = (void *) &active;
  parameters.events.local_fn      = decrement;
  parameters.events.remote_fn     = NULL;

  /* Send a message to endpoint "num_tasks * num_contexts - 1" */
  pami_task_t target_task = (pami_task_t) -1;
  size_t      target_offset = (size_t) -1;
  PAMI_Endpoint_query (_endpoint[num*num_tasks-1], &target_task, &target_offset);
  if (task_id == target_task) expect += num_tasks;
  send_endpoint (context[0], num*num_tasks-1, &parameters);

  fprintf (stdout, "before advance, active = %zu, expect = %zu\n", active, expect);
  while ((active + expect) > 0) PAMI_Context_advancev (context, num, 100);

  for (i=0; i<num; i++) PAMI_Context_unlock (context[i]);

  result = PAMI_Context_destroyv (context, num);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result);
    return 1;
  }

  result = PAMI_Client_destroy (&client);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result);
    return 1;
  }

  fprintf (stdout, "Success (%d)\n", task_id);

  return 0;
};
Ejemplo n.º 9
0
int main (int argc, char ** argv)
{
  /*volatile size_t send_active = 2; */
  volatile size_t send_active = 1;
  volatile size_t recv_active = 1;


  pami_client_t client;
  pami_context_t context;
  char                  cl_string[] = "TEST";
  pami_result_t result = PAMI_ERROR;

    fprintf (stderr, "Before Client initialize\n");
  result = PAMI_Client_create (cl_string, &client, NULL, 0);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result);
    return 1;
  }
    fprintf (stderr, "After Client initialize\n");

    fprintf (stderr, "before context createv\n");
        {  result = PAMI_Context_createv(client, NULL, 0, &context, 1); }
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to create pami context. result = %d\n", result);
    return 1;
  }
    fprintf (stderr, "after context createv\n");

  pami_configuration_t configuration;

  configuration.name = PAMI_CLIENT_TASK_ID;
  result = PAMI_Client_query(client, &configuration,1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
    return 1;
  }
  size_t task_id = configuration.value.intval;
  fprintf (stderr, "My task id = %zu\n", task_id);

  configuration.name = PAMI_CLIENT_NUM_TASKS;
  result = PAMI_Client_query(client, &configuration,1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
    return 1;
  }
  /*size_t num_tasks = configuration.value.intval; */

  size_t dispatch = 0;
  pami_dispatch_callback_function fn;
  fn.p2p = test_dispatch;
  pami_dispatch_hint_t options={};
  fprintf (stderr, "Before PAMI_Dispatch_set() .. &recv_active = %p, recv_active = %zu\n", &recv_active, recv_active);
  result = PAMI_Dispatch_set (context,
                             dispatch,
                             fn,
                             (void *)&recv_active,
                             options);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result);
    return 1;
  }

  pami_send_t parameters;
  parameters.send.dispatch        = dispatch;
  parameters.send.header.iov_base = (void *)&dispatch; /* send *something* */
  parameters.send.header.iov_len  = sizeof(size_t);
  parameters.send.data.iov_base   = (void *)&dispatch; /* send *something* */
  parameters.send.data.iov_len    = sizeof(size_t);
  parameters.events.cookie    = (void *) &send_active;
  parameters.events.local_fn  = send_done_local;
  /*parameters.events.remote_fn = send_done_remote; */
  parameters.events.remote_fn = NULL;
#if 1
  int iter;
  for (iter=0; iter < 100; iter++)
  {
    fprintf (stderr, "before send ...\n");
    PAMI_Endpoint_create (client, task_id, 0, &parameters.send.dest);
    result = PAMI_Send (context, &parameters);
    fprintf (stderr, "... after send.\n");

    fprintf (stderr, "before send-recv advance loop (send_active = %zu, recv_active = %zu) ...\n", send_active, recv_active);
    while (send_active || recv_active)
    {
      result = PAMI_Context_advance (context, 100);
      if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) )
      {
        fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
        return 1;
      }
    }
    fprintf (stderr, "... after send-recv advance loop\n");
        send_active = recv_active = 1;
  }
#endif
  result = PAMI_Context_destroyv(&context, 1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result);
    return 1;
  }

  result = PAMI_Client_destroy(&client);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result);
    return 1;
  }

  return 0;
};
Ejemplo n.º 10
0
int main (int argc, char ** argv)
{
  pami_client_t client;
  pami_context_t context;
  pami_task_t task;
  size_t size;
  pami_dispatch_callback_function fn;
  pami_dispatch_hint_t options;
  
  pami_result_t result = PAMI_ERROR;

  /* ====== INITIALIZE ====== */

  result = PAMI_Client_create ("TEST", &client, NULL, 0);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result);
    return 1;
  }

  task = client_task (client);
  size = client_size (client);
  
  result = PAMI_Context_createv (client, NULL, 0, &context, 1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to create pami context. result = %d\n", result);
    return 1;
  }
  
  
  fn.p2p = dispatch_fn;
  
  options.recv_immediate = PAMI_HINT_DEFAULT;
  result = PAMI_Dispatch_set (context,
                              DISPATCH_ID_DEFAULT_EXPECT_IMMEDIATE,
                              fn,
                              (void *) EXPECT_IMMEDIATE,
                              options);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to register DISPATCH_ID_DEFAULT_EXPECT_IMMEDIATE. result = %d\n", result);
    return 1;
  }

  options.recv_immediate = PAMI_HINT_DEFAULT;
  result = PAMI_Dispatch_set (context,
                              DISPATCH_ID_DEFAULT_EXPECT_ASYNC,
                              fn,
                              (void *) EXPECT_ASYNC,
                              options);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to register DISPATCH_ID_DEFAULT_EXPECT_ASYNC. result = %d\n", result);
    return 1;
  }

  options.recv_immediate = PAMI_HINT_ENABLE;
  result = PAMI_Dispatch_set (context,
                              DISPATCH_ID_ENABLE,
                              fn,
                              (void *) EXPECT_IMMEDIATE,
                              options);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to register DISPATCH_ID_ENABLE. result = %d\n", result);
    return 1;
  }

  options.recv_immediate = PAMI_HINT_DISABLE;
  result = PAMI_Dispatch_set (context,
                              DISPATCH_ID_DISABLE,
                              fn,
                              (void *) EXPECT_ASYNC,
                              options);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to register DISPATCH_ID_DISABLE. result = %d\n", result);
    return 1;
  }


  /* ====== START TEST ====== */
  
  __test_errors = 0;
  __test_recvs  = 0;
  
  size_t test_count = 0;
  
  volatile size_t send_active = 0;
  
  pami_send_t parameters;
  parameters.send.header.iov_base = __junk;
  parameters.send.header.iov_len  = 0;
  parameters.send.data.iov_base   = __junk;
  parameters.send.data.iov_len    = 0;
  parameters.send.dispatch        = 0;
  parameters.events.cookie        = (void *) & send_active;
  parameters.events.local_fn      = decrement;
  parameters.events.remote_fn     = NULL;

  result = PAMI_Endpoint_create (client, 0, 0, &parameters.send.dest);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error:  PAMI_Endpoint_create() failed for task 0, context 0 with %d.\n", result);
	  return 1;
  }
  
  /* ===================================================================
   * 'recv_immediate' default
   * 
   * (header+data) > recv_immediate_max MUST be an asynchronous receive
   * 
   * A zero-byte send will \b always result in an immediate receive.
   * \see pami_dispatch_p2p_function
   * 
   * Data sizes to test:
   *  - recv_immediate_max + 1
   *  - 0
   */

  parameters.send.data.iov_len    = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC) + 1;
  parameters.send.dispatch        = DISPATCH_ID_DEFAULT_EXPECT_ASYNC;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }

  parameters.send.data.iov_len    = 0;
  parameters.send.dispatch        = DISPATCH_ID_DEFAULT_EXPECT_IMMEDIATE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }

  /* ===================================================================
   * 'recv_immediate' enabled
   * 
   * All receives are 'immediate'. (header+data) > recv_immediate_max is
   * invalid, but may not neccesarily return an error.
   * 
   * Data sizes to test:
   *  - 0
   *  - recv_immediate_max
   *  - recv_immediate_max + 1   ...... ?
   */
  parameters.send.data.iov_len    = 0;
  parameters.send.dispatch        = DISPATCH_ID_ENABLE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }

  parameters.send.data.iov_len    = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC);
  parameters.send.dispatch        = DISPATCH_ID_ENABLE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }

#if 0
  parameters.send.data.iov_len    = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC) + 1;
  parameters.send.dispatch        = DISPATCH_ID_ENABLE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %d with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }
#endif

  /* ===================================================================
   * 'recv_immediate' disabled
   * 
   * All receives are NOT 'immediate' - even "zero byte data"
   * 
   * Data sizes to test:
   *  - 0
   *  - recv_immediate_max
   *  - recv_immediate_max + 1
   */
  parameters.send.data.iov_len    = 0;
  parameters.send.dispatch        = DISPATCH_ID_DISABLE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }

  parameters.send.data.iov_len    = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC);
  parameters.send.dispatch        = DISPATCH_ID_DISABLE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }

  parameters.send.data.iov_len    = recv_immediate_max (context, DISPATCH_ID_DEFAULT_EXPECT_ASYNC) + 1;
  parameters.send.dispatch        = DISPATCH_ID_DISABLE;
  
  test_count++;
  if (task == 1)
  {
    send_active++;
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error:  Unable to send to 0x%08x using dispatch %zu with %d.\n", parameters.send.dest, parameters.send.dispatch, result);
	    return 1;
    }
  }


  /* ====== WAIT FOR COMMUNICATION COMPLETION ====== */
  
  if (task == 0)
  {
    while (__test_recvs < test_count)
      PAMI_Context_advance (context, 1000);
  }
  else if (task == 1)
  {
    while (send_active)
      PAMI_Context_advance (context, 1000);
  }


  /* ====== CLEANUP ====== */

  result = PAMI_Context_destroyv (&context, 1);
  if (result != PAMI_SUCCESS) {
    fprintf (stderr, "Error. Unable to destroy context, result = %d\n", result);
    return 1;
  }

  result = PAMI_Client_destroy (&client);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to destroy pami client. result = %d\n", result);
    return 1;
  }

  
  /* ====== REPORT ERRORS ====== */

  if (__test_errors > 0)
  {
    fprintf (stderr, "Error. Non-compliant PAMI receive immediate implementation! error count = %zu\n", __test_errors);
    return 1;
  }
  
  return 0;
}
Ejemplo n.º 11
0
int main(int argc, char* argv[])
{
  pami_result_t result = PAMI_ERROR;

  /* initialize the second client */
  char * clientname = "";
  pami_client_t client;
  result = PAMI_Client_create(clientname, &client, NULL, 0);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

  /* query properties of the client */
  pami_configuration_t config[4];
  config[0].name = PAMI_CLIENT_NUM_TASKS;
  config[1].name = PAMI_CLIENT_TASK_ID;
  config[2].name = PAMI_CLIENT_NUM_CONTEXTS;
  config[3].name = PAMI_CLIENT_NUM_LOCAL_TASKS;
  result = PAMI_Client_query(client, config, 4);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  const size_t world_size      = config[0].value.intval;
  const size_t world_rank      = config[1].value.intval;
  const size_t num_contexts    = (config[2].value.intval > 32) ? 32 : config[2].value.intval; /* because I only need 16+16 contexts in c1 mode */
  const size_t num_local_tasks = config[3].value.intval;
  TEST_ASSERT(num_contexts>1,"num_contexts>1");

  const int ppn    = (int)num_local_tasks;
  const int nnodes = world_size/ppn;
  const int mycore = world_size%nnodes;
  const int mynode = (world_rank-mycore)/ppn;

  const int num_sync            = num_contexts/2;
  const int num_async           = num_contexts/2;
  const int async_context_begin = num_sync+1;
  const int async_context_end   = num_contexts;

  if (world_rank==0)
  {
    printf("hello world from rank %ld of %ld, node %d of %d, core %d of %d \n", 
           world_rank, world_size, mynode, nnodes, mycore, ppn );
    printf("num_contexts = %ld, async_context_begin = %d, async_context_end = %d \n",
            num_contexts, async_context_begin, async_context_end);
    fflush(stdout);
  }

  /* initialize the contexts */
  contexts = (pami_context_t *) safemalloc( num_contexts * sizeof(pami_context_t) );

  result = PAMI_Context_createv( client, NULL, 0, contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

  /* setup the world geometry */
  pami_geometry_t world_geometry;
  result = PAMI_Geometry_world(client, &world_geometry );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world");

  /************************************************************************/

  for (int n=1; n<=(256*1024); n*=2)
  {
    if (world_rank==0) 
    {
        printf("starting n = %d \n", n);
        fflush(stdout);
    }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    double * sbuf = safemalloc(world_size*n*sizeof(double));
    double * rbuf = safemalloc(world_size*n*sizeof(double));

    for (int s=0; s<world_size; s++ )
      for (int k=0; k<n; k++)
        sbuf[s*n+k] = world_rank*n+k;

    for (int s=0; s<world_size; s++ )
      for (int k=0; k<n; k++)
        rbuf[s*n+k] = -1.0;

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    size_t bytes = world_size * n * sizeof(double), bytes_out;

    pami_memregion_t * local_mr  = safemalloc(num_sync * sizeof(pami_memregion_t) );
    pami_memregion_t * shared_mr = safemalloc(num_sync * sizeof(pami_memregion_t) );
    for (int i=0; i<num_sync; i++)
    {
        result = PAMI_Memregion_create(contexts[i], rbuf, bytes, &bytes_out, &(local_mr[i]));
        TEST_ASSERT(result == PAMI_SUCCESS && bytes==bytes_out,"PAMI_Memregion_create");

        result = PAMI_Memregion_create(contexts[async_context_begin+i], sbuf, bytes, &bytes_out, &(shared_mr[i]));
        TEST_ASSERT(result == PAMI_SUCCESS && bytes==bytes_out,"PAMI_Memregion_create");
    }
 
    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    pami_endpoint_t * target_eps = (pami_endpoint_t *) safemalloc( num_async * world_size * sizeof(pami_endpoint_t) );
    for (int target=0; target<world_size; target++)
        for (int i=0; i<num_async; i++)
        {
            result = PAMI_Endpoint_create(client, (pami_task_t) target, i, &(target_eps[target*num_async+i]) );
            TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Endpoint_create");
        }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    pami_memregion_t * shmrs = (pami_memregion_t *) safemalloc( num_async * world_size * sizeof(pami_memregion_t) );

    result = allgather(world_geometry, contexts[0], num_async * sizeof(pami_memregion_t), shared_mr, shmrs);
    TEST_ASSERT(result == PAMI_SUCCESS,"allgather");

    /* check now that count will not iterate over an incomplete iteration space */
    int remote_targets_per_thread = world_size/num_sync;
    assert((world_size%num_sync)==0);

    if (world_rank==0) 
    {
        printf("starting A2A \n");
        fflush(stdout);
    }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    int active = world_size;

    uint64_t t0 = GetTimeBase();

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    /* GCC prior to 4.7 will not permit const variables to be private i.e. firstprivate */
#ifdef _OPENMP
#pragma omp parallel default(shared) firstprivate(n, num_async, num_sync)
#endif
    {
#ifdef _OPENMP
        int tid = omp_get_thread_num();
#else
        int tid = 0;
#endif

        for (int count=0; count<remote_targets_per_thread; count++)
        {
          int target = remote_targets_per_thread*tid + count;
          target += world_rank;
          target  = target % world_size;
 
          //printf("%ld: attempting Rget to %ld \n", (long)world_rank, (long)target);
          //fflush(stdout);
          
          int local_context  = tid; /* each thread uses its own context so this is thread-safe */
          int remote_context = target % num_async;

          pami_rget_simple_t parameters;
          parameters.rma.dest           = target_eps[target*num_async+remote_context];
          //parameters.rma.hints          = ;
          parameters.rma.bytes          = n*sizeof(double);
          parameters.rma.cookie         = &active;
          parameters.rma.done_fn        = cb_done;
          parameters.rdma.local.mr      = &local_mr[local_context];
          parameters.rdma.local.offset  = target*n*sizeof(double);
          parameters.rdma.remote.mr     = &shmrs[target*num_async+remote_context];
          parameters.rdma.remote.offset = world_rank*n*sizeof(double);
 
          result = PAMI_Rget(contexts[local_context], &parameters);
          TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Rget");
        }
    }

    uint64_t t1 = GetTimeBase();
    double  dt1 = (t1-t0)*tic;

    while (active>0)
    {
      result = PAMI_Context_trylock_advancev(&(contexts[0]), num_sync+num_async, 1000);
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");
    }

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    uint64_t t2 = GetTimeBase();
    double  dt2 = (t2-t0)*tic;

    //result = barrier(world_geometry, contexts[0]);
    //TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    double megabytes = 1.e-6*bytes;

    printf("%ld: PAMI_Rget A2A: %ld bytes per rank, local %lf seconds (%lf MB/s), remote %lf seconds (%lf MB/s) \n", 
           (long)world_rank, n*sizeof(double), 
           dt1, megabytes/dt1,
           dt2, megabytes/dt2 );
    fflush(stdout);

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    for (int s=0; s<world_size; s++ )
      for (int k=0; k<n; k++)
      {
        if (rbuf[s*n+k]!=(1.0*s*n+1.0*k))
          printf("%4d: rbuf[%d] = %lf (%lf) \n", (int)world_rank, s*n+k, rbuf[s*n+k], (1.0*s*n+1.0*k) );
      }
    fflush(stdout);

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    for (int i=0; i<num_async; i++)
    {
        result = PAMI_Memregion_destroy(contexts[i], &(local_mr[i]) );
        TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Memregion_destroy");

        result = PAMI_Memregion_destroy(contexts[async_context_begin+i], &(shared_mr[i]) );
        TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Memregion_destroy");
    }
 
    free(shared_mr);
    free(local_mr);
    free(target_eps);
    free(shmrs);
    free(rbuf);
    free(sbuf);
  }

  /************************************************************************/

  result = barrier(world_geometry, contexts[0]);
  TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

  /* finalize the contexts */
  result = PAMI_Context_destroyv( contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

  free(contexts);

  /* finalize the client */
  result = PAMI_Client_destroy( &client );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

  if (world_rank==0)
    printf("%ld: end of test \n", world_rank );
  fflush(stdout);

  return 0;
}
Ejemplo n.º 12
0
int main(int argc, char* argv[])
{
  pami_result_t result = PAMI_ERROR;

  /* initialize the client */
  pami_client_t client;
  char * clientname = "";
  result = PAMI_Client_create(clientname, &client, NULL, 0);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

  /* query properties of the client */
  pami_configuration_t config;
  size_t num_contexts;

  config.name = PAMI_CLIENT_NUM_TASKS;
  result = PAMI_Client_query( client, &config,1);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  world_size = config.value.intval;

  config.name = PAMI_CLIENT_TASK_ID;
  result = PAMI_Client_query( client, &config,1);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  world_rank = config.value.intval;
  printf("hello world from rank %ld of %ld \n", world_rank, world_size );
  fflush(stdout);

  config.name = PAMI_CLIENT_NUM_CONTEXTS;
  result = PAMI_Client_query( client, &config, 1);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  num_contexts = config.value.intval;

  /* initialize the contexts */
  pami_context_t * contexts;
  contexts = (pami_context_t *) malloc( num_contexts * sizeof(pami_context_t) );
  TEST_ASSERT(contexts!=NULL,"malloc");

  result = PAMI_Context_createv( client, &config, 0, contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

  printf("%ld contexts were created by rank %ld \n", num_contexts, world_rank );
  fflush(stdout);

  /* setup the world geometry */
  pami_geometry_t world_geometry;
  result = PAMI_Geometry_world( client, &world_geometry );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world");

  barrier();

  /****************************************************************/

  int n = 1000;
  char * in  = (char *) malloc(n);
  char * out = (char *) malloc(n);
  memset(in,  '\0', n);
  memset(out, '\a', n);

  allgather(n, in, out);

  free(out);
  free(in);

  /****************************************************************/

  barrier();

  /* finalize the contexts */
  result = PAMI_Context_destroyv( contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

  free(contexts);

  /* finalize the client */
  result = PAMI_Client_destroy( &client );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

  printf("%ld: end of test \n", world_rank );
  fflush(stdout);

  return 0;
}
Ejemplo n.º 13
0
int main(int argc, char ** argv)
{
  pami_client_t client;
  pami_context_t context;
  pami_result_t status = PAMI_ERROR;

  status = PAMI_Client_create("TEST", &client, NULL, 0);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", status);
    return 1;
  }

  status = PAMI_Context_createv(client, NULL, 0, &context, 1);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to create pami context. result = %d\n", status);
    return 1;
  }

  /* ------------------------------------------------------------------------ */

  pami_extension_t extension;
  status = PAMI_Extension_open (client, "EXT_collsel", &extension);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. The \"EXT_collsel\" extension is not implemented. result = %d\n", status);
    return 1;
  }

  //printf("before table init\n");
  pami_extension_collsel_init pamix_collsel_init = 
    (pami_extension_collsel_init) PAMI_Extension_symbol (extension, "Collsel_init_fn");
  if (pamix_collsel_init == (void *)NULL)
  {
    fprintf (stderr, "Error. The \"EXT_collsel\" extension function \"Collsel_init_fn\" is not implemented. result = %d\n", status);
    return 1;
  }
  advisor_t advisor;
  advisor_configuration_t configuration[1];
  status = pamix_collsel_init (client, configuration, 1, &context, 1, &advisor);

  pami_extension_collsel_table_generate pamix_collsel_table_generate = 
    (pami_extension_collsel_table_generate) PAMI_Extension_symbol (extension, "Collsel_table_generate_fn");
  if (pamix_collsel_table_generate == (void *)NULL)
  {
    fprintf (stderr, "Error. The \"EXT_collsel\" extension function \"Collsel_table_generate_fn\" is not implemented. result = %d\n", status);
    return 1;
  }
  advisor_params_t params;
  int i = 0;
  //do a bcast on 16 np 1000 bytes
  params.collectives = (pami_xfer_type_t *)malloc(sizeof(pami_xfer_type_t) * 32);
  params.collectives[i++] = PAMI_XFER_BROADCAST;
  params.collectives[i++] = PAMI_XFER_ALLREDUCE;
  params.collectives[i++] = PAMI_XFER_REDUCE;
  params.collectives[i++] = PAMI_XFER_ALLGATHER;
  params.collectives[i++] = PAMI_XFER_ALLGATHERV;
  params.collectives[i++] = PAMI_XFER_ALLGATHERV_INT;
  params.collectives[i++] = PAMI_XFER_SCATTER;
  params.collectives[i++] = PAMI_XFER_SCATTERV;
  params.collectives[i++] = PAMI_XFER_SCATTERV_INT;
  params.collectives[i++] = PAMI_XFER_GATHER;
  params.collectives[i++] = PAMI_XFER_GATHERV;
  params.collectives[i++] = PAMI_XFER_GATHERV_INT;
  params.collectives[i++] = PAMI_XFER_BARRIER;
  params.collectives[i++] = PAMI_XFER_ALLTOALL;
  params.collectives[i++] = PAMI_XFER_ALLTOALLV;
  params.collectives[i++] = PAMI_XFER_ALLTOALLV_INT;
  params.collectives[i++] = PAMI_XFER_SCAN;
  params.collectives[i++] = PAMI_XFER_REDUCE_SCATTER;
  params.num_collectives = i;

  params.geometry_sizes = (size_t *)malloc(sizeof(size_t));
  params.geometry_sizes[0] =512;
  params.num_geometry_sizes = 1;
  params.message_sizes = (size_t *)malloc(sizeof(size_t));
  //params.message_sizes[0] = 2000;
  params.message_sizes[0] = 10000;
  params.num_message_sizes = 1;
  params.iter = 2;
  params.verify = 0;
  params.verbose = 0;
  //params.verify = 1;
  //params.verbose = 1;
  status = pamix_collsel_table_generate (advisor, NULL, &params, 1);

  pami_extension_collsel_destroy pamix_collsel_destroy =
    (pami_extension_collsel_destroy) PAMI_Extension_symbol (extension, "Collsel_destroy_fn");
  if (pamix_collsel_destroy == (void *)NULL)
  {
    fprintf (stderr, "Error. The \"EXT_collsel\" extension function \"Collsel_destroy_fn\" is not implemented. result = %d\n", status);
    return 1;
  }
  status = pamix_collsel_destroy (advisor);
  //printf("after collsel destroy\n");

  free(params.collectives);
  free(params.geometry_sizes);
  free(params.message_sizes);

  status = PAMI_Extension_close (extension);
  if(status != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. The \"EXT_collsel\" extension could not be closed. result = %d\n", status);
    return 1;
  }
  //printf("after extension close\n");


  /* ------------------------------------------------------------------------ */
  status = PAMI_Context_destroyv(&context, 1);
  if(status != PAMI_SUCCESS)
  {
    fprintf(stderr, "Error. Unable to destroy pami context. result = %d\n", status);
    return 1;
  }
  //printf("after context destroy\n");

  status = PAMI_Client_destroy(&client);
  if(status != PAMI_SUCCESS)
  {
    fprintf(stderr, "Error. Unable to finalize pami client. result = %d\n", status);
    return 1;
  }
  //printf("after client destroy\n");

  return 0;
}
Ejemplo n.º 14
0
int main(int argc, char* argv[])
{
  int status = MPI_SUCCESS; 
  pami_result_t result = PAMI_ERROR;

  int provided = MPI_THREAD_SINGLE;
  MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
  /* IBM: --ranks-per-node 64 fails to init threads but this  */
  /* IBM: testcase doesn't really care so don't exit */
  TEST_ASSERT((provided>=MPI_THREAD_MULTIPLE),"MPI_Init_thread"); 

  /* initialize the second client */
  char * clientname = "test"; /* IBM: PE PAMI requires a client name */
  pami_client_t client;
  result = PAMI_Client_create(clientname, &client, NULL, 0);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

  /* query properties of the client */
  pami_configuration_t config[3];
  size_t num_contexts;

  config[0].name = PAMI_CLIENT_NUM_TASKS;
  config[1].name = PAMI_CLIENT_TASK_ID;
  config[2].name = PAMI_CLIENT_NUM_CONTEXTS;
  result = PAMI_Client_query(client, config, 3);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  world_size   = config[0].value.intval;
  world_rank   = config[1].value.intval;
  num_contexts = config[2].value.intval;

  if (world_rank==0)
    printf("hello world from rank %ld of %ld, number of contexts %zu \n", world_rank, world_size, num_contexts );/*IBM: debug num_contexts */
  fflush(stdout);

  /* initialize the contexts */
  pami_context_t * contexts;
  contexts = (pami_context_t *) safemalloc( num_contexts * sizeof(pami_context_t) );

  result = PAMI_Context_createv( client, NULL, 0, contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

  /************************************************************************/
  /* IBM: Updating the test with the assumption that we will Rput the     */
  /* IBM: local byte array to our neighbor's shared byte array.           */

  int n = (argc>1 ? atoi(argv[1]) : 1000);

  size_t bytes = n * sizeof(int), bytes_out;/* IBM: debug - scale up testing */
  int *  shared = (int *) safemalloc(bytes);
  for (int i=0; i<n; i++)
    shared[i] = -1;   /*IBM: initialize with -1, replaced with neighbor's rank */

  pami_memregion_t shared_mr;
  result = PAMI_Memregion_create(contexts[0], shared, bytes, &bytes_out, &shared_mr);
  TEST_ASSERT(result == PAMI_SUCCESS && bytes==bytes_out,"PAMI_Memregion_create");

  int *  local  = (int *) safemalloc(bytes);
  for (int i=0; i<n; i++)
    local[i] = world_rank;   /*IBM: initialize with our rank */

  pami_memregion_t local_mr;
  result = PAMI_Memregion_create(contexts[0], local, bytes, &bytes_out, &local_mr); /* IBM: local */
  TEST_ASSERT(result == PAMI_SUCCESS && bytes==bytes_out,"PAMI_Memregion_create");

  status = MPI_Barrier(MPI_COMM_WORLD);
  TEST_ASSERT(result == MPI_SUCCESS,"MPI_Barrier");

  pami_memregion_t * shmrs = (pami_memregion_t *) safemalloc( world_size * sizeof(pami_memregion_t) );

  status = MPI_Allgather(&shared_mr, sizeof(pami_memregion_t), MPI_BYTE, 
                         shmrs,      sizeof(pami_memregion_t), MPI_BYTE, MPI_COMM_WORLD);
  TEST_ASSERT(result == MPI_SUCCESS,"MPI_Allgather");

  int target = (world_rank>0 ? world_rank-1 : world_size-1);
  pami_endpoint_t target_ep;
  result = PAMI_Endpoint_create(client, (pami_task_t) target, 0, &target_ep);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Endpoint_create");

  int active = 2;
  pami_rput_simple_t parameters;
  parameters.rma.dest           = target_ep;
  parameters.rma.bytes          = bytes;
  parameters.rma.cookie         = &active;
  parameters.rma.done_fn        = cb_done;
  parameters.rdma.local.mr      = &local_mr;
  parameters.rdma.local.offset  = 0;
  parameters.rdma.remote.mr     = &shmrs[target]; /*IBM: target's mem region */
  parameters.rdma.remote.offset = 0;
  parameters.put.rdone_fn       = cb_done;
  result = PAMI_Rput(contexts[0], &parameters);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Rput");

  while (active)
  {
    //result = PAMI_Context_advance( contexts[0], 100);
    //TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance");
    result = PAMI_Context_trylock_advancev(contexts, 1, 1000);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");
  }

  /* IBM: I'm done with Rput but my world_rank + 1 neighbor might not be so need to advance */
  /* IBM: Could do a barrier or send/recv a completion message instead ....*/

  active = 10; /* IBM: Arbitrary - advance some more - 10*10000 good enough?  */
  while (--active)                                                         /* IBM*/
  {                                                                        /* IBM*/
    result = PAMI_Context_trylock_advancev(contexts, 1, 10000);            /* IBM*/
  /*TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");*/ /* IBM*/
  }                                                                        /* IBM*/

  int errors = 0;
  
  target = (world_rank<(world_size-1) ? world_rank+1 : 0);
  for (int i=0; i<n; i++)
    if ((shared[i] != target) ||
        (local[i] != world_rank)) /*IBM: also verify didn't change local */
       errors++;

  if (errors>0)
  {
      printf("%ld: %d errors :-( \n", (long)world_rank,  errors);  /*IBM: grep "errors" in scaled up output */
      for (int i=0; i<n; i++)
        printf("%ld: local[%d] = %d , shared[%d] = %d (%d) \n", (long)world_rank, i, local[i], i, shared[i], target); /*IBM: print both arrays */
  }
  else
    printf("%ld: no errors :-) \n", (long)world_rank); 


  MPI_Barrier(MPI_COMM_WORLD);

  result = PAMI_Memregion_destroy(contexts[0], &shared_mr);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Memregion_destroy");

  result = PAMI_Memregion_destroy(contexts[0], &local_mr);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Memregion_destroy");

  free(shmrs);
  free(local);
  free(shared);

  /************************************************************************/

  /* finalize the contexts */
  result = PAMI_Context_destroyv( contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

  free(contexts);

  /* finalize the client */
  result = PAMI_Client_destroy( &client );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

  status = MPI_Barrier(MPI_COMM_WORLD);
  TEST_ASSERT(result == MPI_SUCCESS,"MPI_Barrier");

  MPI_Finalize();

  if (world_rank==0)
    printf("%ld: end of test \n", world_rank );
  fflush(stdout);

  return 0;
}
Ejemplo n.º 15
0
int main(int argc, char* argv[])
{
  pami_result_t result = PAMI_ERROR;

  /* initialize the second client */
  char * clientname = "";
  pami_client_t client;
  result = PAMI_Client_create(clientname, &client, NULL, 0);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

  /* query properties of the client */
  pami_configuration_t config[3];
  size_t num_contexts;

  config[0].name = PAMI_CLIENT_NUM_TASKS;
  config[1].name = PAMI_CLIENT_TASK_ID;
  config[2].name = PAMI_CLIENT_NUM_CONTEXTS;
  result = PAMI_Client_query(client, config, 3);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  world_size   = config[0].value.intval;
  world_rank   = config[1].value.intval;
  num_contexts = config[2].value.intval;
  TEST_ASSERT(num_contexts>1,"num_contexts>1");

  if (world_rank==0)
  {
    printf("hello world from rank %ld of %ld \n", world_rank, world_size );
    fflush(stdout);
  }

  /* initialize the contexts */
  contexts = (pami_context_t *) safemalloc( num_contexts * sizeof(pami_context_t) );

  result = PAMI_Context_createv( client, NULL, 0, contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

  /* setup the world geometry */
  pami_geometry_t world_geometry;
  result = PAMI_Geometry_world(client, &world_geometry );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world");

  int status = pthread_create(&Progress_thread, NULL, &Progress_function, NULL);
  TEST_ASSERT(status==0, "pthread_create");

  /************************************************************************/

  int n = (argc>1 ? atoi(argv[1]) : 1000);

  size_t bytes = n * sizeof(int);

  int *  local  = (int *) safemalloc(bytes);
  for (int i=0; i<n; i++)
    local[i] = world_rank;

  /* create the endpoint */
  int target = (world_rank>0 ? world_rank-1 : world_size-1);
  pami_endpoint_t target_ep;
  result = PAMI_Endpoint_create(client, (pami_task_t) target, 1, &target_ep);
  //result = PAMI_Endpoint_create(client, (pami_task_t) target, 0, &target_ep);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Endpoint_create");

  /* register the dispatch function */
  pami_dispatch_callback_function dispatch_cb;
  size_t dispatch_id                 = 37;
  dispatch_cb.p2p                    = dispatch_recv_cb;
  pami_dispatch_hint_t dispatch_hint = {0};
  int dispatch_cookie                = 1000000+world_rank;
  //dispatch_hint.recv_immediate       = PAMI_HINT_DISABLE;
  result = PAMI_Dispatch_set(contexts[0], dispatch_id, dispatch_cb, &dispatch_cookie, dispatch_hint);
  result = PAMI_Dispatch_set(contexts[1], dispatch_id, dispatch_cb, &dispatch_cookie, dispatch_hint);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Dispatch_set");

  /* make sure everything is ready */
  result = barrier(world_geometry, contexts[0]);
  TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

  // The iovec structure describes a buffer. It contains two fields:
  // void *iov_base - Contains the address of a buffer. 
  // size_t iov_len - Contains the length of the buffer.

  int header  = 37373;

  int active = 1;
  pami_send_t parameters;
  parameters.send.header.iov_base = &header;
  parameters.send.header.iov_len  = sizeof(int);
  parameters.send.data.iov_base   = local;
  parameters.send.data.iov_len    = bytes;
  parameters.send.dispatch        = dispatch_id;
  //parameters.send.hints           = ;
  parameters.send.dest            = target_ep;
  parameters.events.cookie        = &active;
  parameters.events.local_fn      = cb_done;
  parameters.events.remote_fn     = NULL;//cb_done;

  uint64_t t0 = GetTimeBase();

  result = PAMI_Send(contexts[0], &parameters);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Send");

  while (active)
  {
    //result = PAMI_Context_advance( contexts[0], 100);
    //TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance");
    result = PAMI_Context_trylock_advancev(&(contexts[0]), 1, 1000);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");
  }

  uint64_t t1 = GetTimeBase();
  uint64_t dt = t1-t0;

  /* barrier on non-progressing context to make sure CHT does its job */
  barrier(world_geometry, contexts[0]);

  printf("%ld: PAMI_Send of %ld bytes achieves %lf MB/s \n", (long)world_rank, bytes, 1.6e9*1e-6*(double)bytes/(double)dt );
  fflush(stdout);

  result = barrier(world_geometry, contexts[0]);
  TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

  free(local);

  /************************************************************************/

  void * rv;

  status = pthread_cancel(Progress_thread);
  TEST_ASSERT(status==0, "pthread_cancel");

  status = pthread_join(Progress_thread, &rv);
  TEST_ASSERT(status==0, "pthread_join");

  result = barrier(world_geometry, contexts[0]);
  TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

  /* finalize the contexts */
  result = PAMI_Context_destroyv( contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

  free(contexts);

  /* finalize the client */
  result = PAMI_Client_destroy( &client );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

  if (world_rank==0)
    printf("%ld: end of test \n", world_rank );
  fflush(stdout);

  return 0;
}
Ejemplo n.º 16
0
int main (int argc, char ** argv)
{
  volatile size_t _rts_active = 1;
  volatile size_t _ack_active = 1;

  memset(&null_send_hint, 0, sizeof(null_send_hint));

  pami_client_t client;
  pami_context_t context[2];

  char                  cl_string[] = "TEST";
  pami_result_t result = PAMI_ERROR;

  result = PAMI_Client_create (cl_string, &client, NULL, 0);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to create pami client. result = %d\n", result);
    return 1;
  }

#ifdef TEST_CROSSTALK
  size_t num = 2;
#else
  size_t num = 1;
#endif
  result = PAMI_Context_createv(client, NULL, 0, context, num);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to create pami context(s). result = %d\n", result);
    return 1;
  }

  pami_configuration_t configuration;

  configuration.name = PAMI_CLIENT_TASK_ID;
  result = PAMI_Client_query(client, &configuration,1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
    return 1;
  }
  pami_task_t task_id = configuration.value.intval;
  fprintf (stderr, "My task id = %d\n", task_id);

  configuration.name = PAMI_CLIENT_NUM_TASKS;
  result = PAMI_Client_query(client, &configuration,1);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
    return 1;
  }
  size_t num_tasks = configuration.value.intval;
  fprintf (stderr, "Number of tasks = %zu\n", num_tasks);
  if (num_tasks < 2)
  {
    fprintf (stderr, "Error. This test requires at least 2 tasks. Number of tasks in this job: %zu\n", num_tasks);
    return 1;
  }

  pami_dispatch_hint_t options={};

#ifdef USE_SHMEM_OPTION
  options.use_shmem = PAMI_HINT_ENABLE;
  fprintf (stderr, "##########################################\n");
  fprintf (stderr, "shared memory optimizations forced ON\n");
  fprintf (stderr, "##########################################\n");
#elif defined(NO_SHMEM_OPTION)
  options.use_shmem = PAMI_HINT_DISABLE;
  fprintf (stderr, "##########################################\n");
  fprintf (stderr, "shared memory optimizations forced OFF\n");
  fprintf (stderr, "##########################################\n");
#endif

  size_t i = 0;
#ifdef TEST_CROSSTALK
  for (i=0; i<2; i++)
#endif
  {
    pami_dispatch_callback_function fn;

    fprintf (stderr, "Before PAMI_Dispatch_set(%d) .. &_rts_active = %p, _rts_active = %zu\n", DISPATCH_ID_RTS, &_rts_active, _rts_active);
    fn.p2p = dispatch_rts;
    result = PAMI_Dispatch_set (context[i],
                                DISPATCH_ID_RTS,
                                fn,
                                (void *)&_rts_active,
                                options);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result);
      return 1;
    }

    fprintf (stderr, "Before PAMI_Dispatch_set(%d) .. &_ack_active = %p, _ack_active = %zu\n", DISPATCH_ID_ACK, &_ack_active, _ack_active);
    fn.p2p = dispatch_ack;
    result = PAMI_Dispatch_set (context[i],
                                DISPATCH_ID_ACK,
                                fn,
                                (void *)&_ack_active,
                                options);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result);
      return 1;
    }
  }

  if (task_id == 0)
  {
    pami_send_immediate_t parameters;
#ifdef TEST_CROSSTALK
    fprintf (stdout, "PAMI_Rget('simple') functional test [crosstalk]\n");
    fprintf (stdout, "\n");
    PAMI_Endpoint_create (client, num_tasks-1, 1, &parameters.dest);
#else
    fprintf (stdout, "PAMI_Rget('simple') functional test\n");
    fprintf (stdout, "\n");
    PAMI_Endpoint_create (client, num_tasks-1, 0, &parameters.dest);
#endif


    /* Allocate some memory from the heap. */
    void * send_buffer = malloc (BUFFERSIZE);

    /* Initialize the memory for validation. */
    initialize_data ((uint32_t *)send_buffer, BUFFERSIZE, 0);
    print_data (send_buffer, BUFFERSIZE);

    /* Send an 'rts' message to the target task and provide the memory region */
    rts_info_t rts_info;
    PAMI_Endpoint_create (client, 0, 0, &rts_info.origin);
    rts_info.bytes  = BUFFERSIZE;

    /* Create a memory region for this memoru buffer */
    size_t bytes = 0;
    pami_result_t pami_rc = PAMI_Memregion_create (context[0], send_buffer, BUFFERSIZE, &bytes, &(rts_info.memregion));

    if (PAMI_SUCCESS != pami_rc) {
      fprintf (stderr, "PAMI_Memregion_create failed with rc = %d\n", pami_rc) ;
      exit(1);
    }

    parameters.dispatch        = DISPATCH_ID_RTS;
    parameters.header.iov_base = &rts_info;
    parameters.header.iov_len  = sizeof(rts_info_t);
    parameters.data.iov_base   = NULL;
    parameters.data.iov_len    = 0;
fprintf (stderr, "Before PAMI_Send_immediate()\n");
    PAMI_Send_immediate (context[0], &parameters);

    /* wait for the 'ack' */
fprintf (stderr, "Wait for 'ack', _ack_active = %zu\n", _ack_active);
    while (_ack_active != 0)
    {
      result = PAMI_Context_advance (context[0], 100);
      if (result != PAMI_SUCCESS && result != PAMI_EAGAIN)
      {
        fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
        return 1;
      }
    }

    /* Destroy the local memory region */
    PAMI_Memregion_destroy (context[0], &(rts_info.memregion));

    free (send_buffer);

    switch (_ack_status)
    {
      case 0:
        fprintf (stdout, "Test PASSED\n");
        break;
      case 1:
        fprintf (stdout, "Test FAILED (rget error)\n");
        break;
      case 2:
        fprintf (stdout, "Test FAILED (data error)\n");
        break;
      default:
        fprintf (stdout, "Test FAILED (unknown error)\n");
        break;
    }

  }
  else if (task_id == num_tasks-1)
  {
#ifdef TEST_CROSSTALK
      size_t contextid = 1;
#else
      size_t contextid = 0;
#endif

    /* wait for the 'rts' */
fprintf (stderr, "Wait for 'rts', _rts_active = %zu, contextid = %zu\n", _rts_active, contextid);
    while (_rts_active != 0)
    {
      result = PAMI_Context_advance (context[contextid], 100);
      if (result != PAMI_SUCCESS && result != PAMI_EAGAIN)
      {
        fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
        return 1;
      }
    }
  }
fprintf (stderr, "Test completed .. cleanup\n");

 result = PAMI_Context_destroyv (context, num);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result);
    return 1;
  }

  result = PAMI_Client_destroy(&client);
  if (result != PAMI_SUCCESS)
  {
    fprintf (stderr, "Error. Unable to destroy pami client. result = %d\n", result);
    return 1;
  }

  /*fprintf (stdout, "Success (%d)\n", task_id); */

  return 0;
};
Ejemplo n.º 17
0
/**
 * \brief Shut down the system
 *
 * At this time, no attempt is made to free memory being used for MPI structures.
 * \return MPI_SUCCESS
*/
int MPID_Finalize()
{
  pami_result_t rc;
  int mpierrno = MPI_SUCCESS;
  mpir_errflag_t errflag=MPIR_ERR_NONE;
  MPIR_Barrier_impl(MPIR_Process.comm_world, &errflag);

#ifdef MPIDI_STATISTICS
  if (MPIDI_Process.mp_statistics) {
      MPIDI_print_statistics();
  }
  MPIDI_close_pe_extension();
#endif

#ifdef DYNAMIC_TASKING
  mpidi_finalized = 1;
  if(mpidi_dynamic_tasking) {
    /* Tell the process group code that we're done with the process groups.
       This will notify PMI (with PMI_Finalize) if necessary.  It
       also frees all PG structures, including the PG for COMM_WORLD, whose
       pointer is also saved in MPIDI_Process.my_pg */
    mpierrno = MPIDI_PG_Finalize();
    if (mpierrno) {
	TRACE_ERR("MPIDI_PG_Finalize returned with mpierrno=%d\n", mpierrno);
    }

    MPIDI_FreeParentPort();
  }
  if(_conn_info_list) 
    MPIU_Free(_conn_info_list);
  MPIDI_free_all_tranid_node();
#endif


  /* ------------------------- */
  /* shutdown request queues   */
  /* ------------------------- */
  MPIDI_Recvq_finalize();

  PAMIX_Finalize(MPIDI_Client);

#ifdef MPID_NEEDS_ICOMM_WORLD
    MPIR_Comm_release_always(MPIR_Process.icomm_world, 0);
#endif

  MPIR_Comm_release_always(MPIR_Process.comm_self,0);
  MPIR_Comm_release_always(MPIR_Process.comm_world,0);

  rc = PAMI_Context_destroyv(MPIDI_Context, MPIDI_Process.avail_contexts);
  MPID_assert_always(rc == PAMI_SUCCESS);

  rc = PAMI_Client_destroy(&MPIDI_Client);
  MPID_assert_always(rc == PAMI_SUCCESS);

#ifdef MPIDI_TRACE
 {  int i;
  for (i=0; i< MPIDI_Process.numTasks; i++) {
      if (MPIDI_Trace_buf[i].R)
          MPIU_Free(MPIDI_Trace_buf[i].R);
      if (MPIDI_Trace_buf[i].PR)
          MPIU_Free(MPIDI_Trace_buf[i].PR);
      if (MPIDI_Trace_buf[i].S)
          MPIU_Free(MPIDI_Trace_buf[i].S);
  }
 }
 MPIU_Free(MPIDI_Trace_buf);
#endif

#ifdef OUT_OF_ORDER_HANDLING
  MPIU_Free(MPIDI_In_cntr);
  MPIU_Free(MPIDI_Out_cntr);
#endif

 if (TOKEN_FLOW_CONTROL_ON)
   {
     #if TOKEN_FLOW_CONTROL
     extern char *EagerLimit;

     if (EagerLimit) MPIU_Free(EagerLimit);
     MPIU_Free(MPIDI_Token_cntr);
     MPIDI_close_mm();
     #else
     MPID_assert_always(0);
     #endif
   }

  return MPI_SUCCESS;
}
Ejemplo n.º 18
0
int main (int argc, char ** argv)
{
  pami_client_t client;
  pami_context_t context;
  pami_configuration_t * configuration = NULL;
  char                  cl_string[] = "TEST";
  pami_result_t result = PAMI_ERROR;

  result = PAMI_Client_create (cl_string, &client, NULL, 0);

  if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable to initialize pami client. result = %d\n", result);
      return 1;
    }
    
  pami_task_t task = PAMIX_Client_task (client);
  size_t      size = PAMIX_Client_size (client);

  result = PAMI_Context_createv (client, configuration, 0, &context, 1);

  if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable to create pami context. result = %d\n", result);
      return 1;
    }

  /* Attempt to send using a dispatch id that has not been registered. */
  char metadata[1024];
  char buffer[1024];

  volatile unsigned recv_active = 1;
  volatile unsigned send_active = 1;
  
  pami_send_t parameters;
  parameters.send.dispatch        = 10;
  parameters.send.header.iov_base = metadata;
  parameters.send.header.iov_len  = 8;
  parameters.send.data.iov_base   = buffer;
  parameters.send.data.iov_len    = 8;
  parameters.events.cookie        = (void *) & send_active;
  parameters.events.local_fn      = decrement;
  parameters.events.remote_fn     = NULL;

  PAMI_Endpoint_create (client, size-1, 0, &parameters.send.dest);

  if (task == 0)
  {
    result = PAMI_Send (context, &parameters);
    if (result == PAMI_SUCCESS)
    {
      fprintf (stderr, "Test failure. Expected error when using an unregistered dispatch id.\n");
      return 1;
    }
  }

  size_t dispatch = 10;
  pami_dispatch_callback_function fn;
  fn.p2p = test_dispatch;
  pami_dispatch_hint_t options = {};
  result = PAMI_Dispatch_set (context, dispatch, fn, (void *) & recv_active, options);

  if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable register pami dispatch. result = %d\n", result);
      return 1;
    }

  if (task == 0)
  {
    PAMI_Endpoint_create (client, size-1, 0, &parameters.send.dest);
    
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable to send using a registered dispatch id.\n");
      return 1;
    }
    
    while (send_active)
    {
      result = PAMI_Context_advance (context, 1000);

      if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) )
        {
          fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
          return 1;
        }
    }
    
    while (recv_active)
    {
      result = PAMI_Context_advance (context, 1000);

      if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) )
        {
          fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
          return 1;
        }
    }
  }
  else if (task == (size-1))
  {
    PAMI_Endpoint_create (client, 0, 0, &parameters.send.dest);
  
    while (recv_active)
    {
      result = PAMI_Context_advance (context, 1000);

      if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) )
        {
          fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
          return 1;
        }
    }
    
    result = PAMI_Send (context, &parameters);
    if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable to send using a registered dispatch id.\n");
      return 1;
    }
    
    while (send_active)
    {
      result = PAMI_Context_advance (context, 1000);

      if ( (result != PAMI_SUCCESS) && (result != PAMI_EAGAIN) )
        {
          fprintf (stderr, "Error. Unable to advance pami context. result = %d\n", result);
          return 1;
        }
    }    
  }
  
  result = PAMI_Context_destroyv(&context, 1);

  if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result);
      return 1;
    }

  result = PAMI_Client_destroy(&client);

  if (result != PAMI_SUCCESS)
    {
      fprintf (stderr, "Error. Unable to finalize pami client. result = %d\n", result);
      return 1;
    }

  return 0;
};
Ejemplo n.º 19
0
int main(int argc, char* argv[])
{
    pami_result_t result = PAMI_ERROR;

    /* initialize the second client */
    char * clientname = "";
    pami_client_t client;
    result = PAMI_Client_create(clientname, &client, NULL, 0);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

    /* query properties of the client */
    pami_configuration_t config[3];
    size_t num_contexts;

    config[0].name = PAMI_CLIENT_NUM_TASKS;
    config[1].name = PAMI_CLIENT_TASK_ID;
    config[2].name = PAMI_CLIENT_NUM_CONTEXTS;
    result = PAMI_Client_query(client, config, 3);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
    world_size   = config[0].value.intval;
    world_rank   = config[1].value.intval;
    num_contexts = config[2].value.intval;
    TEST_ASSERT(num_contexts>1,"num_contexts>1");

    if (world_rank==0)
    {
        printf("hello world from rank %ld of %ld \n", world_rank, world_size );
        fflush(stdout);
    }

    /* initialize the contexts */
    contexts = (pami_context_t *) safemalloc( num_contexts * sizeof(pami_context_t) );

    result = PAMI_Context_createv( client, NULL, 0, contexts, num_contexts );
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

    /* setup the world geometry */
    pami_geometry_t world_geometry;
    result = PAMI_Geometry_world(client, &world_geometry );
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world");

    int status = pthread_create(&Progress_thread, NULL, &Progress_function, NULL);
    TEST_ASSERT(status==0, "pthread_create");

    /************************************************************************/

    int n = (argc>1 ? atoi(argv[1]) : 1000000);

    size_t bytes = n * sizeof(int);
    int *  shared = (int *) safemalloc(bytes);
    for (int i=0; i<n; i++)
        shared[i] = world_rank;

    int *  local  = (int *) safemalloc(bytes);
    for (int i=0; i<n; i++)
        local[i] = -1;

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    int ** shptrs = (int **) safemalloc( world_size * sizeof(int *) );

    result = allgather(world_geometry, contexts[0], sizeof(int*), &shared, shptrs);
    TEST_ASSERT(result == PAMI_SUCCESS,"allgather");

    int target = (world_rank>0 ? world_rank-1 : world_size-1);
    pami_endpoint_t target_ep;
    result = PAMI_Endpoint_create(client, (pami_task_t) target, 1, &target_ep);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Endpoint_create");

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    int active = 1;
    pami_get_simple_t parameters;
    parameters.rma.dest     = target_ep;
    //parameters.rma.hints    = ;
    parameters.rma.bytes    = bytes;
    parameters.rma.cookie   = &active;
    parameters.rma.done_fn  = cb_done;
    parameters.addr.local   = local;
    parameters.addr.remote  = shptrs[target];

    uint64_t t0 = GetTimeBase();

    result = PAMI_Get(contexts[0], &parameters);
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Rget");

    while (active)
    {
        //result = PAMI_Context_advance( contexts[0], 100);
        //TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance");
        result = PAMI_Context_trylock_advancev(&(contexts[0]), 1, 1000);
        TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_trylock_advancev");
    }

    uint64_t t1 = GetTimeBase();
    uint64_t dt = t1-t0;

    /* barrier on non-progressing context to make sure CHT does its job */
    barrier(world_geometry, contexts[0]);

    printf("%ld: PAMI_Get of %ld bytes achieves %lf MB/s \n", (long)world_rank, bytes, 1.6e9*1e-6*(double)bytes/(double)dt );
    fflush(stdout);

    int errors = 0;

    //target = (world_rank<(world_size-1) ? world_rank+1 : 0);
    target = (world_rank>0 ? world_rank-1 : world_size-1);
    for (int i=0; i<n; i++)
        if (local[i] != target)
            errors++;

    if (errors>0)
        for (int i=0; i<n; i++)
            if (local[i] != target)
                printf("%ld: local[%d] = %d (%d) \n", (long)world_rank, i, local[i], target);
            else
                printf("%ld: no errors :-) \n", (long)world_rank);

    fflush(stdout);

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    free(shptrs);
    free(local);
    free(shared);

    /************************************************************************/

    void * rv;

    status = pthread_cancel(Progress_thread);
    TEST_ASSERT(status==0, "pthread_cancel");

    status = pthread_join(Progress_thread, &rv);
    TEST_ASSERT(status==0, "pthread_join");

    result = barrier(world_geometry, contexts[0]);
    TEST_ASSERT(result == PAMI_SUCCESS,"barrier");

    /* finalize the contexts */
    result = PAMI_Context_destroyv( contexts, num_contexts );
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

    free(contexts);

    /* finalize the client */
    result = PAMI_Client_destroy( &client );
    TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

    if (world_rank==0)
        printf("%ld: end of test \n", world_rank );
    fflush(stdout);

    return 0;
}
Ejemplo n.º 20
0
int main(int argc, char ** argv)
{
    pami_client_t         client;
    pami_context_t        context;
    pami_result_t         status = PAMI_ERROR;
    pami_configuration_t  pami_config;
    pami_geometry_t       world_geo;
    size_t                barrier_alg_num[2];
    pami_algorithm_t*     bar_always_works_algo = NULL;
    pami_metadata_t*      bar_always_works_md = NULL;
    pami_algorithm_t*     bar_must_query_algo = NULL;
    pami_metadata_t*      bar_must_query_md   = NULL;
    pami_xfer_t           barrier;
    int                   my_id;
    volatile int          is_fence_done   = 0;
    volatile int          is_barrier_done = 0;

    /* create PAMI client */
    RC( PAMI_Client_create("TEST", &client, NULL, 0) );
    DBG_FPRINTF((stderr,"Client created successfully at 0x%p\n",client));

    /* create PAMI context */
    RC( PAMI_Context_createv(client, NULL, 0, &context, 1) );
    DBG_FPRINTF((stderr,"Context created successfully at 0x%p\n",context));

    /* query my task id */
    bzero(&pami_config, sizeof(pami_configuration_t));
    pami_config.name = PAMI_CLIENT_TASK_ID;
    RC( PAMI_Client_query(client, &pami_config, 1) );
    my_id = pami_config.value.intval;
    DBG_FPRINTF((stderr,"My task id is %d\n", my_id));

    /* get the world geometry */
    RC( PAMI_Geometry_world(client, &world_geo) );
    DBG_FPRINTF((stderr,"World geometry is at 0x%p\n",world_geo));

    /* query number of barrier algorithms */
    RC( PAMI_Geometry_algorithms_num(world_geo, PAMI_XFER_BARRIER, 
                barrier_alg_num) );
    DBG_FPRINTF((stderr,"%d-%d algorithms are available for barrier op\n",
                barrier_alg_num[0], barrier_alg_num[1]));
    if (barrier_alg_num[0] <= 0) {
        fprintf (stderr, "Error. No (%lu) algorithm is available for barrier op\n",
                barrier_alg_num[0]);
        return 1;
    }

    /* query barrier algorithm list */
    bar_always_works_algo =
        (pami_algorithm_t*)malloc(sizeof(pami_algorithm_t)*barrier_alg_num[0]);
    bar_always_works_md =
        (pami_metadata_t*)malloc(sizeof(pami_metadata_t)*barrier_alg_num[0]);
    bar_must_query_algo =
        (pami_algorithm_t*)malloc(sizeof(pami_algorithm_t)*barrier_alg_num[1]);
    bar_must_query_md =
        (pami_metadata_t*)malloc(sizeof(pami_metadata_t)*barrier_alg_num[1]);

    RC( PAMI_Geometry_algorithms_query(world_geo, PAMI_XFER_BARRIER,
                bar_always_works_algo, bar_always_works_md, barrier_alg_num[0],
                bar_must_query_algo, bar_must_query_md, barrier_alg_num[1]) );
    DBG_FPRINTF((stderr,"Algorithm [%s] at 0x%p will be used for barrier op\n",
                bar_always_works_md[0].name, bar_always_works_algo[0]));

    /* begin PAMI fence */
    RC( PAMI_Fence_begin(context) );
    DBG_FPRINTF((stderr,"PAMI fence begins\n"));

    /* ------------------------------------------------------------------------ */

    pami_extension_t          extension;
    const char                ext_name[] = "EXT_hfi_extension";
    const char                sym_name[] = "hfi_remote_update";
    hfi_remote_update_fn      remote_update = NULL;
    hfi_remote_update_info_t  remote_info;
    pami_memregion_t          mem_region;
    size_t                    mem_region_sz = 0;
    unsigned long long        operand = 1234;
    unsigned long long        orig_val = 0;
    int                       offset = (operand)%MAX_TABLE_SZ;

    /* initialize table for remote update operation */
    int i;
    for (i = 0; i < MAX_TABLE_SZ; i ++) {
        table[i] = (unsigned long long) i;
    }
    orig_val = table[offset];

    /* open PAMI extension */
    RC( PAMI_Extension_open (client, ext_name, &extension) );
    DBG_FPRINTF((stderr,"Open %s successfully.\n", ext_name));

    /* load PAMI extension function */
    remote_update = (hfi_remote_update_fn) 
        PAMI_Extension_symbol (extension, sym_name);
    if (remote_update == (void *)NULL)
    {
        fprintf (stderr, "Error. Failed to load %s function in %s\n",
                 sym_name, ext_name); 
        return 1;
    } else {
        DBG_FPRINTF((stderr,"Loaded function %s in %s successfully.\n", 
                    sym_name, ext_name));
    }

    /* create a memory region for remote update operation */
    RC( PAMI_Memregion_create(context, table, 
                MAX_TABLE_SZ*sizeof(unsigned long long),
                &mem_region_sz, &mem_region) );
    DBG_FPRINTF((stderr,"%d-byte PAMI memory region created successfully.\n",
                mem_region_sz));

    /* perform a PAMI barrier */
    is_barrier_done = 0;
    barrier.cb_done = barrier_done;
    barrier.cookie = (void*)&is_barrier_done;
    barrier.algorithm = bar_always_works_algo[0];
    RC( PAMI_Collective(context, &barrier) );
    DBG_FPRINTF((stderr,"PAMI barrier op invoked successfully.\n"));
    while (is_barrier_done == 0)
        PAMI_Context_advance(context, 1000);
    DBG_FPRINTF((stderr,"PAMI barrier op finished successfully.\n"));

    RC( PAMI_Context_lock(context) );

    /* prepare remote update info */
    remote_info.dest = my_id^1;
    remote_info.op = 0;           /* op_add */
    remote_info.atomic_operand = operand;
    remote_info.dest_buf = (unsigned long long)(&(table[offset]));

    /* invoke remote update PAMI extension function */
    RC( remote_update(context, 1, &remote_info) );
    DBG_FPRINTF((stderr,"Function %s invoked successfully.\n", 
                sym_name));

    RC( PAMI_Context_unlock(context) );

    /* perform a PAMI fence */
    is_fence_done = 0;
    RC( PAMI_Fence_all(context, fence_done, (void*)&is_fence_done) );
    DBG_FPRINTF((stderr,"PAMI_Fence_all invoked successfully.\n")); 
    while (is_fence_done == 0)
        PAMI_Context_advance(context, 1000);
    DBG_FPRINTF((stderr,"PAMI_Fence_all finished successfully.\n")); 

    /* perform a PAMI barrier */
    is_barrier_done = 0;
    barrier.cb_done = barrier_done;
    barrier.cookie = (void*)&is_barrier_done;
    barrier.algorithm = bar_always_works_algo[0];
    RC( PAMI_Collective(context, &barrier) );
    DBG_FPRINTF((stderr,"PAMI barrier op invoked successfully.\n"));
    while (is_barrier_done == 0)
        PAMI_Context_advance(context, 1000);
    DBG_FPRINTF((stderr,"PAMI barrier op finished successfully.\n"));

    /* verify data after remote update operation */
    if (table[offset] != orig_val + operand) {
        printf("Data verification at offset %d with operand %lu failed: "
                "[%lu expected with %lu updated]\n",
                offset, operand, orig_val+operand, table[offset]);
    } else {
        printf("Data verification at offset %d with operand %lu passed: "
                "[%lu expected with %lu updated].\n",
                offset, operand, orig_val+operand, table[offset]);
    }

    /* destroy the memory region after remote update operation */
    RC( PAMI_Memregion_destroy(context, &mem_region) );
    DBG_FPRINTF((stderr,"PAMI memory region removed successfully.\n"));

    /* close PAMI extension */
    RC( PAMI_Extension_close (extension) );
    DBG_FPRINTF((stderr,"Close %s successfully.\n", ext_name));

    /* ------------------------------------------------------------------------ */

    /* end PAMI fence */
    RC( PAMI_Fence_end(context) );
    DBG_FPRINTF((stderr,"PAMI fence ends\n"));

    /* destroy PAMI context */
    RC( PAMI_Context_destroyv(&context, 1) );
    DBG_FPRINTF((stderr, "PAMI context destroyed successfully\n"));

    /* destroy PAMI client */
    RC( PAMI_Client_destroy(&client) );
    DBG_FPRINTF((stderr, "PAMI client destroyed successfully\n"));

    return 0;
}
Ejemplo n.º 21
0
int main(int argc, char* argv[])
{
  pami_result_t result = PAMI_ERROR;

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "before PAMI_Client_create");

  /* initialize the client */
  char * clientname = "";
  pami_client_t client;
  result = PAMI_Client_create( clientname, &client, NULL, 0 );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_create");

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after PAMI_Client_create");

  /* query properties of the client */
  pami_configuration_t config;
  size_t num_contexts;

  config.name = PAMI_CLIENT_TASK_ID;
  result = PAMI_Client_query( client, &config, 1);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  world_rank = config.value.intval;

  config.name = PAMI_CLIENT_NUM_TASKS;
  result = PAMI_Client_query( client, &config, 1);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  world_size = config.value.intval;

  if ( world_rank == 0 ) 
  {
    printf("starting test on %ld ranks \n", world_size);
    fflush(stdout);
  }

  config.name = PAMI_CLIENT_PROCESSOR_NAME;
  result = PAMI_Client_query( client, &config, 1);
  assert(result == PAMI_SUCCESS);
  //printf("rank %ld is processor %s \n", world_rank, config.value.chararray);
  //fflush(stdout);

  config.name = PAMI_CLIENT_NUM_CONTEXTS;
  result = PAMI_Client_query( client, &config, 1);
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_query");
  num_contexts = config.value.intval;

  /* initialize the contexts */
  pami_context_t * contexts = NULL;
  contexts = (pami_context_t *) malloc( num_contexts * sizeof(pami_context_t) ); assert(contexts!=NULL);

  if (Kernel_GetRank()==0)
    fprintf(stdout, "num_contexts = %ld \n", (long)num_contexts);

  result = PAMI_Context_createv( client, &config, 0, contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_createv");

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after PAMI_Context_createv");

  /* setup the world geometry */
  pami_geometry_t world_geometry;
  pami_xfer_type_t barrier_xfer = PAMI_XFER_BARRIER;
  size_t num_alg[2];
  pami_algorithm_t * safe_barrier_algs = NULL;
  pami_metadata_t  * safe_barrier_meta = NULL;
  pami_algorithm_t * fast_barrier_algs = NULL;
  pami_metadata_t  * fast_barrier_meta = NULL;

  result = PAMI_Geometry_world( client, &world_geometry );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_world");

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after PAMI_Geometry_world");

  result = PAMI_Geometry_algorithms_num( world_geometry, barrier_xfer, num_alg );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_algorithms_num");
  if ( world_rank == 0 ) printf("number of barrier algorithms = {%ld,%ld} \n", num_alg[0], num_alg[1] );

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after PAMI_Geometry_algorithms_num");

  safe_barrier_algs = (pami_algorithm_t *) malloc( num_alg[0] * sizeof(pami_algorithm_t) ); assert(safe_barrier_algs!=NULL);
  safe_barrier_meta = (pami_metadata_t  *) malloc( num_alg[0] * sizeof(pami_metadata_t)  ); assert(safe_barrier_meta!=NULL);
  fast_barrier_algs = (pami_algorithm_t *) malloc( num_alg[1] * sizeof(pami_algorithm_t) ); assert(fast_barrier_algs!=NULL);
  fast_barrier_meta = (pami_metadata_t  *) malloc( num_alg[1] * sizeof(pami_metadata_t)  ); assert(fast_barrier_meta!=NULL);
  result = PAMI_Geometry_algorithms_query( world_geometry, barrier_xfer,
                                           safe_barrier_algs, safe_barrier_meta, num_alg[0],
                                           fast_barrier_algs, fast_barrier_meta, num_alg[1] );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Geometry_algorithms_query");

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after PAMI_Geometry_algorithms_query");

  /* perform a barrier */
  size_t b;
  pami_xfer_t barrier;
  volatile int active = 0;

  for ( b = 0 ; b < num_alg[0] ; b++ )
  {
      barrier.cb_done   = cb_done;
      barrier.cookie    = (void*) &active;
      barrier.algorithm = safe_barrier_algs[b];

      uint64_t t0 = GetTimeBase();
      active = 1;
      result = PAMI_Collective( contexts[0], &barrier );
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Collective - barrier");
      while (active)
        result = PAMI_Context_advance( contexts[0], 1 );
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance - barrier");
      uint64_t t1 = GetTimeBase();

      if ( world_rank == 0 ) printf("safe barrier algorithm %ld (%s) - took %llu cycles \n", b, safe_barrier_meta[b].name, (long long unsigned int)t1-t0 );
      fflush(stdout);
  }
  for ( b = 0 ; b < num_alg[1] ; b++ )
  {
      barrier.cb_done   = cb_done;
      barrier.cookie    = (void*) &active;
      barrier.algorithm = fast_barrier_algs[b];

      uint64_t t0 = GetTimeBase();
      active = 1;
      result = PAMI_Collective( contexts[0], &barrier );
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Collective - barrier");
      while (active)
        result = PAMI_Context_advance( contexts[0], 1 );
      TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_advance - barrier");
      uint64_t t1 = GetTimeBase();

      if ( world_rank == 0 ) printf("fast barrier algorithm %ld (%s) - took %llu cycles \n", b, fast_barrier_meta[b].name, (long long unsigned int)t1-t0 );
      fflush(stdout);
  }

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after barrier tests");

  /* finalize the contexts */
  result = PAMI_Context_destroyv( contexts, num_contexts );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Context_destroyv");

  free(contexts);

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "before PAMI_Client_destroy");

  /* finalize the client */
  result = PAMI_Client_destroy( &client );
  TEST_ASSERT(result == PAMI_SUCCESS,"PAMI_Client_destroy");

  if (Kernel_GetRank()==0)
    print_meminfo(stdout, "after PAMI_Client_destroy");

  if ( world_rank == 0 ) 
  {
    printf("end of test \n");
    fflush(stdout);
  }

  return 0;
}
Ejemplo n.º 22
0
int main (int argc, char ** argv)
{
  pami_client_t client;
  pami_context_t context[2];
  pami_task_t task_id;
  size_t num_tasks = 0;
  size_t ncontexts = 0;
  size_t errors = 0;

  pami_result_t result = PAMI_ERROR;



  pami_type_t subtype;
  pami_type_t compound_type;
  pami_type_t simple_type;

  info_t exchange[MAX_TASKS];
  double data[BUFFERSIZE];
  volatile unsigned ready;

  { /* init */


    ready = 0;

    unsigned i;

    for (i = 0; i < MAX_TASKS; i++)
      exchange[i].active = 0;

    for (i = 0; i < BUFFERSIZE; i++)
      data[i] = E;


    result = PAMI_Client_create ("TEST", &client, NULL, 0);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable to create pami client. result = %d\n", result);
        return 1;
      }

    pami_configuration_t configuration;

    configuration.name = PAMI_CLIENT_TASK_ID;
    result = PAMI_Client_query(client, &configuration, 1);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
        return 1;
      }

    task_id = configuration.value.intval;
    /*fprintf (stderr, "My task id = %d\n", task_id);*/

    configuration.name = PAMI_CLIENT_NUM_TASKS;
    result = PAMI_Client_query(client, &configuration, 1);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
        return 1;
      }

    num_tasks = configuration.value.intval;

    /*if (task_id == 0)
      fprintf (stderr, "Number of tasks = %zu\n", num_tasks);*/

    if ((num_tasks < 2) || (num_tasks > MAX_TASKS))
      {
        fprintf (stderr, "Error. This test requires 2-%d tasks. Number of tasks in this job: %zu\n", MAX_TASKS, num_tasks);
        return 1;
      }

    if (task_id == num_tasks - 1)
      {
        for (i = 0; i < 320; i++)
          data[i] = PI;
      }

    configuration.name = PAMI_CLIENT_NUM_CONTEXTS;
    result = PAMI_Client_query(client, &configuration, 1);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable query configuration (%d). result = %d\n", configuration.name, result);
        return 1;
      }

    ncontexts = (configuration.value.intval < 2) ? 1 : 2;

    /*if (task_id == 0)
      fprintf (stderr, "maximum contexts = %zu, number of contexts used in this test = %zu\n", configuration.value.intval, ncontexts);*/

    result = PAMI_Context_createv(client, NULL, 0, context, ncontexts);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable to create pami context(s). result = %d\n", result);
        return 1;
      }


    pami_dispatch_hint_t options = {};

    for (i = 0; i < ncontexts; i++)
      {
        pami_dispatch_callback_function fn;

        fn.p2p = dispatch_exchange;
        result = PAMI_Dispatch_set (context[i],
                                    EXCHANGE_DISPATCH_ID,
                                    fn,
                                    (void *) exchange,
                                    options);

        if (result != PAMI_SUCCESS)
          {
            fprintf (stderr, "Error. Unable register pami 'exchange' dispatch. result = %d\n", result);
            return 1;
          }

        fn.p2p = dispatch_notify;
        result = PAMI_Dispatch_set (context[i],
                                    NOTIFY_DISPATCH_ID,
                                    fn,
                                    (void *) & ready,
                                    options);

        if (result != PAMI_SUCCESS)
          {
            fprintf (stderr, "Error. Unable register pami 'notify' dispatch. result = %d\n", result);
            return 1;
          }
      }


    /* ***********************************
     * Create the pami types
     ************************************/

    /* This compound noncontiguous type is composed of one double, skips a double,
     * two doubles, skips a double, three doubles, skips a double, five doubles,
     * skips a double, six doubles, skips a double, seven doubles, skips a double,
     * eight doubles, then skips two doubles.
     *
     * This results in a type with 32 doubles that is 40 doubles
     * 'wide'.
     */
    PAMI_Type_create (&subtype);
    PAMI_Type_add_simple (subtype,
                          sizeof(double),      /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 2); /* stride */
    PAMI_Type_add_simple (subtype,
                          sizeof(double) * 2,  /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 3); /* stride */
    PAMI_Type_add_simple (subtype,
                          sizeof(double) * 3,  /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 4); /* stride */
    PAMI_Type_add_simple (subtype,
                          sizeof(double) * 5,  /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 6); /* stride */
    PAMI_Type_add_simple (subtype,
                          sizeof(double) * 6,  /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 7); /* stride */
    PAMI_Type_add_simple (subtype,
                          sizeof(double) * 7,  /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 8);/* stride */
    PAMI_Type_add_simple (subtype,
                          sizeof(double) * 8,  /* bytes */
                          0,                   /* offset */
                          1,                   /* count */
                          sizeof(double) * 10);/* stride */
    PAMI_Type_complete (subtype, sizeof(double));

    /* This noncontiguous type is composed of the above compound type, repeated
     * ten times with no stride.
     *
     * This results in a type with 320 doubles that is 400 doubles
     * 'wide'.
     */
    PAMI_Type_create (&compound_type);
    PAMI_Type_add_typed (compound_type,
                         subtype,                  /* subtype */
                         0,                        /* offset */
                         10,                       /* count */
                         sizeof(double) * 32);     /* stride */
    PAMI_Type_complete (compound_type, sizeof(double));


    /* This simple noncontiguous type is composed of eight contiguous doubles,
     * then skips a _single_ double, repeated 40 times.
     *
     * This results in a type with 320 doubles that is 360 doubles 'wide'.
     */
    PAMI_Type_create (&simple_type);
    PAMI_Type_add_simple (simple_type,
                          sizeof(double) * 8,  /* bytes */
                          0,                   /* offset */
                          40,                  /* count */
                          sizeof(double) * 9); /* stride */
    PAMI_Type_complete (simple_type, sizeof(double));


    /* Create a memory region for the local data buffer. */
    size_t bytes;
    result = PAMI_Memregion_create (context[0], (void *) data, BUFFERSIZE,
                                    &bytes, &exchange[task_id].mr);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable to create memory region. result = %d\n", result);
        return 1;
      }
    else if (bytes < BUFFERSIZE)
      {
        fprintf (stderr, "Error. Unable to create memory region of a large enough size. result = %d\n", result);
        return 1;
      }

    /* Broadcast the memory region to all tasks - including self. */
    for (i = 0; i < num_tasks; i++)
      {
        pami_send_immediate_t parameters;
        parameters.dispatch        = EXCHANGE_DISPATCH_ID;
        parameters.header.iov_base = (void *) & bytes;
        parameters.header.iov_len  = sizeof(size_t);
        parameters.data.iov_base   = (void *) & exchange[task_id].mr;
        parameters.data.iov_len    = sizeof(pami_memregion_t);
        PAMI_Endpoint_create (client, i, 0, &parameters.dest);

        result = PAMI_Send_immediate (context[0], &parameters);
      }

    /* Advance until all memory regions have been received. */
    for (i = 0; i < num_tasks; i++)
      {
        while (exchange[i].active == 0)
          PAMI_Context_advance (context[0], 100);
      }


  } /* init done */


  pami_send_immediate_t notify;
  notify.dispatch        = NOTIFY_DISPATCH_ID;
  notify.header.iov_base = NULL;
  notify.header.iov_len  = 0;
  notify.data.iov_base   = NULL;
  notify.data.iov_len    = 0;

  volatile size_t active = 1;

  pami_rget_typed_t parameters;
  parameters.rma.hints          = (pami_send_hint_t) {0};
  parameters.rma.cookie         = (void *) & active;
  parameters.rma.done_fn        = decrement;
  parameters.rma.bytes          = 320 * sizeof(double);

  if (task_id == 0)
    {
      fprintf (stdout, "PAMI_Rget('typed') functional test %s\n", (ncontexts < 2) ? "" : "[crosstalk]");
      fprintf (stdout, "\n");

      parameters.rdma.local.mr      = &exchange[0].mr;
      parameters.rdma.remote.mr     = &exchange[num_tasks - 1].mr;
      PAMI_Endpoint_create (client, num_tasks - 1, ncontexts - 1, &parameters.rma.dest);

      PAMI_Endpoint_create (client, num_tasks - 1, ncontexts - 1, &notify.dest);
    }
  else
    {
      parameters.rdma.local.mr      = &exchange[num_tasks - 1].mr;
      parameters.rdma.remote.mr     = &exchange[0].mr;
      PAMI_Endpoint_create (client, 0, 0, &parameters.rma.dest);

      PAMI_Endpoint_create (client, 0, 0, &notify.dest);
    }


  /* ******************************************************************** */

  /* contiguous -> contiguous transfer test                               */

  /* ******************************************************************** */
  if (task_id == 0)
    {
      parameters.rdma.local.offset  = 0;
      parameters.rdma.remote.offset = 0;
      parameters.type.local         = PAMI_TYPE_DOUBLE;
      parameters.type.remote        = PAMI_TYPE_DOUBLE;

      active = 1;
      PAMI_Rget_typed (context[0], &parameters);

      while (active > 0)
        PAMI_Context_advance (context[0], 100);

      /* Notify the remote task that the data has been transfered. */
      PAMI_Send_immediate (context[0], &notify);
    }
  else if (task_id == num_tasks - 1)
    {
      /* Wait for notification that the data has been transfered. */
      while (ready == 0)
        PAMI_Context_advance (context[ncontexts - 1], 100);

      ready = 0;
    }

  /* ******************************************************************** */

  /* contiguous -> non-contiguous transfer test                               */

  /* ******************************************************************** */
  if (task_id == num_tasks - 1)
    {
      parameters.rdma.local.offset  = 4 * 1024;
      parameters.rdma.remote.offset = 0;
      parameters.type.local         = simple_type;
      parameters.type.remote        = PAMI_TYPE_DOUBLE;

      active = 1;
      PAMI_Rget_typed (context[ncontexts - 1], &parameters);

      while (active > 0)
        PAMI_Context_advance (context[ncontexts - 1], 100);

      /* Notify the remote task that the data has been transfered. */
      PAMI_Send_immediate (context[ncontexts - 1], &notify);
    }
  else if (task_id == 0)
    {
      /* Wait for notification that the data has been transfered. */
      while (ready == 0)
        PAMI_Context_advance (context[0], 100);

      ready = 0;
    }

  /* ******************************************************************** */

  /* non-contiguous -> non-contiguous transfer test                       */

  /* ******************************************************************** */
  if (task_id == 0)
    {
      parameters.rdma.local.offset  = 4 * 1024;
      parameters.rdma.remote.offset = 4 * 1024;
      parameters.type.local         = compound_type;
      parameters.type.remote        = simple_type;

      active = 1;
      PAMI_Rget_typed (context[0], &parameters);

      while (active > 0)
        PAMI_Context_advance (context[0], 100);

      /* Notify the remote task that the data has been transfered. */
      PAMI_Send_immediate (context[0], &notify);
    }
  else if (task_id == num_tasks - 1)
    {
      /* Wait for notification that the data has been transfered. */
      while (ready == 0)
        PAMI_Context_advance (context[ncontexts - 1], 100);

      ready = 0;
    }

  /* ******************************************************************** */

  /* non-contiguous -> contiguous transfer test                           */

  /* ******************************************************************** */
  if (task_id == num_tasks - 1)
    {
      parameters.rdma.local.offset  = 8 * 1024;
      parameters.rdma.remote.offset = 4 * 1024;
      parameters.type.local         = PAMI_TYPE_DOUBLE;
      parameters.type.remote        = compound_type;

      active = 1;
      PAMI_Rget_typed (context[ncontexts - 1], &parameters);

      while (active > 0)
        PAMI_Context_advance (context[ncontexts - 1], 100);

      /* Notify the remote task that the data has been transfered. */
      PAMI_Send_immediate (context[ncontexts - 1], &notify);
    }
  else if (task_id == 0)
    {
      /* Wait for notification that the data has been transfered. */
      while (ready == 0)
        PAMI_Context_advance (context[0], 100);

      ready = 0;
    }


  /* ******************************************************************** */

  /* VERIFY data buffers                                                  */

  /* ******************************************************************** */
  if (task_id == num_tasks - 1)
    {
      if (task_id == 0)
        {
          unsigned i = 0;

          for (; i < 320; i++)
            {
              if (data[i] != PI)
                {
                  errors++;
                  fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, PI, data[i]);
                }
            }

          for (; i < 512; i++)
            {
              if (data[i] != E)
                {
                  errors++;
                  fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]);
                }
            }

          unsigned j = 0;

          for (; j < 40; j++)
            {
              unsigned n = 0;

              for (; n < 8; n++)
                {
                  if (data[i] != PI)
                    {
                      errors++;
                      fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, PI, data[i]);
                    }

                  i++;
                }

              if (data[i] != E)
                {
                  errors++;
                  fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]);
                }

              i++;
            }

          for (; i < 1024; i++)
            {
              if (data[i] != E)
                {
                  errors++;
                  fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]);
                }
            }


          for (; i < 1024 + 320; i++)
            {
              if (data[i] != PI)
                {
                  errors++;
                  fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, PI, data[i]);
                }
            }

          for (; i < BUFFERSIZE; i++)
            {
              if (data[i] != E)
                {
                  errors++;
                  fprintf (stderr, "Error. data[%d] != %g ..... (%g)\n", i, E, data[i]);
                }
            }
        }
    }

  { /* cleanup */
    result = PAMI_Context_destroyv(context, ncontexts);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable to destroy pami context. result = %d\n", result);
        return 1;
      }

    result = PAMI_Client_destroy(&client);

    if (result != PAMI_SUCCESS)
      {
        fprintf (stderr, "Error. Unable to destroy pami client. result = %d\n", result);
        return 1;
      }

    if (task_id == num_tasks - 1)
      {
        if (errors)
          fprintf (stdout, "Test completed with errors (%zu)\n", errors);
        else
          fprintf (stdout, "Test completed with success\n");
      }

  } /* cleanup done */

  return (errors != 0);
};