示例#1
0
int MPI_Finalize(void)
{
    int result, MPIT_result;
    uint64_t * exchange_count_matrix = NULL;
    uint64_t * exchange_size_matrix = NULL;
    uint64_t * exchange_avg_size_matrix = NULL;

    if (0 == comm_world_rank) {
        exchange_count_matrix = (uint64_t *) malloc(comm_world_size * comm_world_size * sizeof(uint64_t));
        exchange_size_matrix = (uint64_t *) malloc(comm_world_size * comm_world_size * sizeof(uint64_t));
        exchange_avg_size_matrix = (uint64_t *) malloc(comm_world_size * comm_world_size * sizeof(uint64_t));
    }

    stop_monitoring_result(&counts);
    stop_monitoring_result(&sizes);

    get_monitoring_result(&counts);
    get_monitoring_result(&sizes);

    PMPI_Gather(counts.vector, comm_world_size, MPI_UNSIGNED_LONG, exchange_count_matrix, comm_world_size, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);
    PMPI_Gather(sizes.vector,  comm_world_size, MPI_UNSIGNED_LONG, exchange_size_matrix,  comm_world_size, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);

    if (0 == comm_world_rank) {
        int i, j;

        //Get the same matrix than profile2mat.pl
        for (i = 0; i < comm_world_size; ++i) {
            for (j = i + 1; j < comm_world_size; ++j) {
                exchange_count_matrix[i * comm_world_size + j] = exchange_count_matrix[j * comm_world_size + i] = (exchange_count_matrix[i * comm_world_size + j] + exchange_count_matrix[j * comm_world_size + i]) / 2;
                exchange_size_matrix[i * comm_world_size + j] = exchange_size_matrix[j * comm_world_size + i] = (exchange_size_matrix[i * comm_world_size + j] + exchange_size_matrix[j * comm_world_size + i]) / 2;
                if (exchange_count_matrix[i * comm_world_size + j] != 0)
                    exchange_avg_size_matrix[i * comm_world_size + j] = exchange_avg_size_matrix[j * comm_world_size + i] = exchange_size_matrix[i * comm_world_size + j] / exchange_count_matrix[i * comm_world_size + j];
            }
        }

        write_mat("monitoring_msg.mat", exchange_count_matrix, comm_world_size);
        write_mat("monitoring_size.mat", exchange_size_matrix, comm_world_size);
        write_mat("monitoring_avg.mat", exchange_avg_size_matrix, comm_world_size);
    }

    free(exchange_count_matrix);
    free(exchange_size_matrix);
    free(exchange_avg_size_matrix);
    destroy_monitoring_result(&counts);
    destroy_monitoring_result(&sizes);

    MPIT_result = MPI_T_pvar_session_free(&session);
    if (MPIT_result != MPI_SUCCESS) {
        fprintf(stderr, "WARNING : failed to free MPI_T session, monitoring results may be impacted : check your OpenMPI installation\n");
    }

    MPIT_result = MPI_T_finalize();
    if (MPIT_result != MPI_SUCCESS) {
        fprintf(stderr, "WARNING : failed to finalize MPI_T interface, monitoring results may be impacted : check your OpenMPI installation\n");
    }

    result = PMPI_Finalize();

    return result;
}
示例#2
0
int MPI_Finalize( void )
{

    int i,j;
    PMPI_Gather( my_send_count, proc_num, MPI_INT, send_count, proc_num, MPI_INT, 0, MPI_COMM_WORLD );
    PMPI_Gather( my_send_size, proc_num, MPI_INT, send_size, proc_num, MPI_INT, 0, MPI_COMM_WORLD );
    FILE *fp;
    
    if(my_rank == 0){
        fp = fopen("matrix.data", "w");
        for(i = 0; i < proc_num; i++){

            for(j = 0; j < proc_num; j++){
                if(j != i && send_count[i*proc_num+j] != 0)
		    //fprintf(fp, "%d %d %d %d\n", i, j, send_count[i * proc_num + j], send_size[i * proc_num + j]);
		    fprintf(fp, "%d %d %d %lf\n", i, j, send_count[i * proc_num + j], (double)(send_size[i * proc_num + j])/send_count[i * proc_num + j]);

            }   

        }

        fclose(fp);
    }
    
    PMPI_Finalize();
}
示例#3
0
/* STUB */
int PMPI_Allgather ( void *sendbuf, int sendcount, MPI_Datatype sendtype,
                    void *recvbuf, int recvcount, MPI_Datatype recvtype, 
                   MPI_Comm comm )
{
  _MPI_COVERAGE();
  return PMPI_Gather(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,0,comm);
}
示例#4
0
int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
               void *recvbuf, int recvcount, MPI_Datatype recvtype,
               int root, MPI_Comm comm)
{
  return PMPI_Gather(sendbuf, sendcount, sendtype,
                     recvbuf, recvcount, recvtype,
                     root, comm);
}
示例#5
0
void primary_mapp(int rank, int commsize)
{
    int my_id;

    old_mapp = malloc(sizeof(*old_mapp) * commsize);
    new_mapp = malloc(sizeof(*old_mapp) * commsize);
    if (new_mapp == NULL || old_mapp == NULL) {
        fprintf(stderr, "new_mapping error or old_mapping\n");
        PMPI_Abort(MPI_COMM_WORLD, -1);
    }

    my_id = getnodeid();
    if (rank == 0) {
        PMPI_Gather(&my_id, 1, MPI_INT, old_mapp, 1,
                    MPI_INT, 0, MPI_COMM_WORLD);
    } else {
        PMPI_Gather(&my_id, 1, MPI_INT, NULL, 0, MPI_INT, 0, MPI_COMM_WORLD);
    }
}
示例#6
0
文件: mpiPi.c 项目: saxena/mpip
static int
mpiPi_mergept2ptHashStats ()
{
  int ac;
  pt2pt_stats_t **av;
  int totalCount = 0;

  if (mpiPi.do_pt2pt_detail_report)
    {
      /* gather local task data */
      h_gather_data (mpiPi.task_pt2pt_stats, &ac, (void ***) &av);

      /* Make sure we have data to collect, otherwise skip */
      PMPI_Allreduce (&ac, &totalCount, 1, MPI_INT, MPI_SUM, mpiPi.comm);

      mpiPi_msg_debug("(%d) Merging pt2pt stats: totalCount: %d\n",
		      mpiPi.rank, totalCount);

      if (totalCount < 1)
	{
	  if (mpiPi.rank == mpiPi.collectorRank)
	    {
	      mpiPi_msg_warn
		("Collector found no records to merge. Omitting report.\n");
	    }
	  return 1;
	}

      /* Gather the ac for all ranks at the root */
      if (mpiPi.rank == mpiPi.collectorRank)
	{
	  mpiPi.accumulatedPt2ptCounts = (int*)calloc(mpiPi.size, sizeof(int));
	  assert(mpiPi.accumulatedPt2ptCounts);
	}

      PMPI_Gather(&ac, 1, MPI_INT, mpiPi.accumulatedPt2ptCounts,
		  1, MPI_INT, mpiPi.collectorRank, mpiPi.comm);

      /* gather global data at collector */
      if (mpiPi.rank == mpiPi.collectorRank)
	{
	  mpiPi_recv_pt2pt_stats(ac,av);
	}
      else
	{
	  /* Send all pt2pt data to collector */
	  mpiPi_send_pt2pt_stats(ac,av);
	}
    }

  return 1;

}
示例#7
0
int mod_clustering_output(ipm_mod_t* mod, int flags)
{
  int i, j;
  procstats_t *allstats;

  get_procstats(ipm_htable, &mystats);

  if( task.taskid==0 ) {
    allstats = malloc( sizeof(procstats_t)*task.ntasks );
  }

  PMPI_Gather( &mystats, sizeof(procstats_t), MPI_BYTE,
	       allstats, sizeof(procstats_t), MPI_BYTE,
	       0, MPI_COMM_WORLD );


  if( task.taskid==0 ) {
    
    cluster_by_structural(allstats);

    print_procstat(1, 0);
    for( i=0; i<task.ntasks; i++ ) 
      {
	print_procstat(0, &(allstats[i]));
      }

    qsort( allstats, task.ntasks, sizeof(procstats_t), 
	   compare_procstat_by_rank);  
  
  }


  PMPI_Scatter( allstats, sizeof(procstats_t), MPI_BYTE,
		&mystats, sizeof(procstats_t), MPI_BYTE,
		0, MPI_COMM_WORLD);
  
}
示例#8
0
文件: mpiPi.c 项目: saxena/mpip
/*
 * mpiPi_collect_basics() - all tasks send their basic info to the
 * collectorRank.
 */
static void
mpiPi_collect_basics (int report_style)
{
  mpiPi_msg_debug ("Collect Basics\n");

  if (mpiPi.rank == mpiPi.collectorRank)
    {
      /* In the case where multiple reports are generated per run,
         only allocate memory for global_task_info once */
      if (mpiPi.global_task_app_time == NULL)
	{
	  mpiPi.global_task_app_time =
	    (double *) calloc (mpiPi.size, sizeof (double));

	  if (mpiPi.global_task_app_time == NULL)
	    mpiPi_abort
	      ("Failed to allocate memory for global_task_app_time");

	  mpiPi_msg_debug
	    ("MEMORY : Allocated for global_task_app_time :          %13ld\n",
	     mpiPi.size * sizeof (double));
	}

      bzero (mpiPi.global_task_app_time, mpiPi.size * sizeof (double));

      if (mpiPi.global_task_mpi_time == NULL)
	{
	  mpiPi.global_task_mpi_time =
	    (double *) calloc (mpiPi.size, sizeof (double));

	  if (mpiPi.global_task_mpi_time == NULL)
	    mpiPi_abort
	      ("Failed to allocate memory for global_task_mpi_time");

	  mpiPi_msg_debug
	    ("MEMORY : Allocated for global_task_mpi_time :          %13ld\n",
	     mpiPi.size * sizeof (double));
	}

      bzero (mpiPi.global_task_mpi_time, mpiPi.size * sizeof (double));

      //  Only allocate hostname storage if we are doing a verbose report
      if (mpiPi.global_task_hostnames == NULL
	  && (report_style == mpiPi_style_verbose
	      || report_style == mpiPi_style_both))
	{
	  mpiPi.global_task_hostnames =
	    (mpiPi_hostname_t *) calloc (mpiPi.size,
					 sizeof (char) *
					 MPIPI_HOSTNAME_LEN_MAX);

	  if (mpiPi.global_task_hostnames == NULL)
	    mpiPi_abort
	      ("Failed to allocate memory for global_task_hostnames");

	  mpiPi_msg_debug
	    ("MEMORY : Allocated for global_task_hostnames :          %13ld\n",
	     mpiPi.size * sizeof (char) * MPIPI_HOSTNAME_LEN_MAX);
	}

      if (mpiPi.global_task_hostnames != NULL)
	bzero (mpiPi.global_task_hostnames,
	       mpiPi.size * sizeof (char) * MPIPI_HOSTNAME_LEN_MAX);
    }

  PMPI_Gather (&mpiPi.cumulativeTime, 1, MPI_DOUBLE,
	       mpiPi.global_task_app_time, 1, MPI_DOUBLE,
	       mpiPi.collectorRank, mpiPi.comm);

  if (report_style == mpiPi_style_verbose || report_style == mpiPi_style_both)
    {
      PMPI_Gather (mpiPi.hostname, MPIPI_HOSTNAME_LEN_MAX, MPI_CHAR,
		   mpiPi.global_task_hostnames, MPIPI_HOSTNAME_LEN_MAX,
		   MPI_CHAR, mpiPi.collectorRank, mpiPi.comm);
    }

  return;
}