Esempio n. 1
0
int main(int argc, char **argv)
{
    int iterations = (argc == 2) ? atoi(argv[1]) : ITERATIONS;

    for (int i = FIRST_THREAD; i < thread_count; i++) {
        pthread_t thread;
        pthread_create(&thread, NULL, tester_thread, (void *)(long)i);
    }

    run_tests(thread_count, iterations);
    summarize_results();
}
Esempio n. 2
0
/* Run a simulation, using the specified environment file.  */
static void
run_one_simulation (struct dirent *env_file)
{
  if (! open_log_file (env_file->d_name))
    return;

  LOG ((LOG_SCREEN | LOG_FILE), "*** Starting simulation %s ***", env_file->d_name);

  /* Parse the environment file.  */
  if (! initialize_from_env_file (env_file))
    return;

  /* Create all agents.  */
  if (! spawn_all_agents ())
    return;

  for (env.current_time = 0;
       env.current_time < env.simulation_length;
       env.current_time++)
    {
      LOG (LOG_FILE, " ");
      LOG ((LOG_SCREEN | LOG_FILE), "Starting Turn %u", env.current_time);
      LOG (LOG_FILE, " ");
      log_map ();
      LOG (LOG_FILE, " ");
      run_one_turn ();
      if (env.alive_agents == NULL)
        {
          LOG ((LOG_SCREEN | LOG_FILE), "  ** All agents have died -- aborting simulation");
          break;
        }
    }

  /* Clean up.  */
  summarize_results ();
  destroy_all_agents ();
  destroy_agent_list (&env.alive_agents);
  destroy_all_predators ();
  destroy_environment ();
  LOG ((LOG_SCREEN | LOG_FILE), "*** End of simulation %s ***\n", env_file->d_name);

  close_log_file ();
}
Esempio n. 3
0
int main(int argc, char *argv[]) 
{
  int i,curlorank,nextrank;
  int measuring_comm;
  double com,com1;
  int ranklo,rankhi;
  int reslen,rxlen;
  char procname[MAX_CHARS_IN_PROCNAME];
  char rxprocname[MAX_CHARS_IN_PROCNAME];
  MPI_Status status;
  struct hostent* thisHost;

  if(argc != 1)
  {
    printf("Usage: mmpi\n");
    exit(-1);
  }

  /* Initialize MPI */

  MPI_Init(&argc,&argv);

  /* Determine max and current rank */

  MPI_Comm_size(MPI_COMM_WORLD,&maxrank);
  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
  MPI_Get_processor_name(procname,&reslen);  

  /* Determine Host name*/
  gethostname(hostname,MAXHOSTNAMELEN);
  if(len)
  {
    printf("getshostname - %s\n",hostname);
  }

  /* Collect processor information for rank on rank 0 */
  /* This is used to label the physical nodes on the output */
  /* This depends on a MPI message exchange from all nodes to rank 0 node */
  for(i=0;i<maxrank;i++)
  {
    if(myid == 0)
    {
      if(i==0)
      {
       memcpy(procs[0].name,procname,reslen+1);
       procs[0].rank = 0;
      }
      else
      {
        MPI_Recv(rxprocname, MAX_CHARS_IN_PROCNAME, MPI_CHAR, i,10, MPI_COMM_WORLD, &status);

        /* find length of bcast string */
        for(rxlen=0;rxlen<MAX_CHARS_IN_PROCNAME;rxlen++)
        {
          if(rxprocname[rxlen] == 0)
          {
            break;
          }
        }
        memcpy(procs[i].name,rxprocname,rxlen);
        procs[i].rank=i;
      }
    }
    else if(myid==i)
    {
      MPI_Send(procname, MAX_CHARS_IN_PROCNAME, MPI_CHAR,0,10, MPI_COMM_WORLD);
    }
  }

  /* Sync all Processes now that processer info has been exchanged by ranks */

  MPI_Barrier(MPI_COMM_WORLD);

  /* The path_count counter indicates how many processor to processor links we */
  /* test per repitition. It gets reset on each repitition but is correct at the */
  /* end of the last one */

  total_path_count = 0;
  for (curlorank=0;curlorank<maxrank;curlorank++)
  {
    for (nextrank=curlorank;nextrank<maxrank;nextrank++)
    {
      comm_path_info[total_path_count].totalMsgs = 0;
      comm_path_info[total_path_count].totalTime = 0.0;
      comm_path_info[total_path_count].totalTime2 = 0.0;
      ++total_path_count;
    }
  }
  printf("Total paths under test: %d\n",total_path_count);

  /* Top level repeat which will take use through the total number of repeat times */
  /* We run through the ranks then repeat in order to reduce buffering issues */

  for (rep_count=0;rep_count<repeatNum;rep_count++)
  {
    /* rep_count indicates which of the total number of repeat intervals */
    /* we are currently measuring. */

    cur_path_count = 0;

    for (curlorank=0;curlorank<maxrank;curlorank++)
    {
      for (nextrank=curlorank;nextrank<maxrank;nextrank++)
      {
        if ((myid == curlorank) && (myid == nextrank))
	    {
	       /* this is a process sending a message to itself */
	       /* We consider this an invalid case and set values to 0.0 */
	       p2p_msgs(curlorank,nextrank,results);
	    }
	    else if (myid == curlorank)
        {
	      /* We use the convention that the low rank always initiate the send */
	      /* the message traffic is round trip so upper never initiates msgs*/
	      tx_msgs(curlorank,nextrank,results);
	    }
	    else if (myid == nextrank)
	    {
	      /*This is the case where we are receiving message from the low rank */
	      rx_msgs(curlorank,nextrank);
	    }
	    else
        {
	      /* In some cases, our process is not sending or receiving */
	      /*In this case, we go to a barrier and wait for the other processes */
	      /* to do the exchange and then continue */
	      stay_in_sync();
        }

	    /* Collect measures is another MPI exchange. The data just collected */
	    /* is sent back to the rank 0 processes to help build reports */
	    /* Sync all processes, then the rank 0 and lo_rank processes */
	    /* send data to rank 0 for reporting */
        MPI_Barrier(MPI_COMM_WORLD);
		collect_measures(curlorank,nextrank,results);
        ++cur_path_count;
     } /* End receive rank loop */
    } /* End send rank loop */
  } /* end repeat interval */

  MPI_Finalize();
  if (myid== 0)
  {
    summarize_results(total_path_count,comm_path_info);
  }
  return (1);
}