Пример #1
0
int main(int argc, char *argv[])
{
    g_tw_ts_end = 1e9*60*60*24*365; /* one year, in nsecs */
    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    if (!conf_file_name[0]){
        fprintf(stderr, "Expected \"codes-config\" option, please see --help.\n");
        MPI_Finalize();
        return 1;
    }
    if (configuration_load(conf_file_name, MPI_COMM_WORLD, &config)){
        fprintf(stderr, "Error loading config file %s.\n", conf_file_name);
        MPI_Finalize();
        return 1;
    }

    resource_lp_init();
    lp_type_register("server", &s_lp);

    codes_mapping_setup();

    resource_lp_configure();

    tw_run();
    tw_end();
}
Пример #2
0
int main(int argc, char *argv[])
{
    int i;
    
    g_tw_events_per_pe = nlp_per_pe * nlp_per_pe * 1;
    
    tw_opt_add(olsr_opts);
    
    tw_init(&argc, &argv);
    
    tw_define_lps(nlp_per_pe, sizeof(olsr_msg_data), 0);
    
    for (i = 0; i < g_tw_nlp; i++) {
        tw_lp_settype(i, &olsr_lps[0]);
    }
    
    tw_run();
    
    if (tw_ismaster()) {
        printf("Complete.\n");
    }
    
    tw_end();
    
    return 0;
}
Пример #3
0
int
main(int argc, char **argv, char **env)
{
  int i;
  lookahead = 1.0;
  tw_opt_add(app_opt);
  tw_init(&argc, &argv);

  g_tw_memory_nqueues = 16;

  offset_lpid = g_tw_mynode * nlp_per_pe;
  ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;

  g_tw_events_per_pe = (mult * nlp_per_pe * g_wifi_start_events)+ optimistic_memory;
  g_tw_lookahead = lookahead;

  tw_define_lps(nlp_per_pe, sizeof(wifi_message));

  tw_lp_settype(0, &mylps[0]);

  for(i = 1; i < g_tw_nlp; i++)
    tw_lp_settype(i, &mylps[0]);

  tw_run();
  tw_end();

	return 0;
}
Пример #4
0
int
main(int argc, char **argv, char **env)
{
    int		 i;

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    offset_lpid = g_tw_mynode * nlp_per_pe;
    ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;
    g_tw_memory_nqueues = 1;
    g_tw_events_per_pe = (mult * nlp_per_pe * g_mem_start_events) +
                         optimistic_memory;

    tw_define_lps(nlp_per_pe, sizeof(mem_message));

    for(i = 0; i < nlp_per_pe; i++)
        tw_lp_settype(i, &mylps[0]);

    //((g_tw_nlp/g_tw_nkp) * g_mem_start_events),
    // init the memory interface
    my_fd = tw_memory_init(g_tw_events_per_pe * nbufs, sizeof(mem_packet), 0.5);

    tw_run();

    mem_stats_print();

    tw_end();

    return 0;
}
Пример #5
0
int
main(int argc, char **argv, char **env)
{
	int		 i;

	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	offset_lpid = g_tw_mynode * nlp_per_pe;
	ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;
	g_tw_memory_nqueues = 1;
	g_tw_events_per_pe = (mult * nlp_per_pe * g_tmr_start_events) + 
				optimistic_memory;

	tw_define_lps(nlp_per_pe, sizeof(tmr_message), 0);

	// use g_tw_nlp now
	for(i = 0; i < g_tw_nlp; i++)
		tw_lp_settype(i, &mylps[0]);

	tw_kp_memory_init(g_tw_kp, 1000, 100, 1);

	if(verbose)
		f = stdout;
	else
		f = fopen("output", "w");
		//f = fopen("/dev/null", "w");

	tw_run();
	tw_end();

	return 0;
}
Пример #6
0
int main(int argc, char **argv, char **env)
{
	int		 i;

	lookahead = 1.0;
	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	if( lookahead > 1.0 )
		tw_error(TW_LOC, "Lookahead > 1.0 .. needs to be less\n");

	g_tw_events_per_pe = (nlp_per_pe * g_start_events) + optimistic_memory;
	g_tw_lookahead = lookahead;

	tw_define_lps(nlp_per_pe, sizeof(ping_pong_message), 0);

	for(i = 0; i < g_tw_nlp; i++)
		tw_lp_settype(i, &mylps[0]);

	if( g_tw_mynode == 0 )
	{
		printf("=============================================\n");
		printf("Ping Pong Configuration..............\n");
		printf("   Lookahead..............%lf\n", lookahead);
		printf("   Start-events...........%u\n", g_start_events);
		printf("   Memory.................%u\n", optimistic_memory);
		printf("========================================\n\n");
	}

	tw_run();
	tw_end();

	return 0;
}
int main(
    int argc,
    char **argv)
{
    int nprocs;
    int rank;
    int num_nets, *net_ids;
    //printf("\n Config count %d ",(int) config.lpgroups_count);
    g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */
    lp_io_handle handle;

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    if(argc < 2)
    {
	    printf("\n Usage: mpirun <args> --sync=2/3 mapping_file_name.conf (optional --nkp) ");
	    MPI_Finalize();
	    return 0;
    }
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  
    configuration_load(argv[2], MPI_COMM_WORLD, &config);
    svr_add_lp_type();
    
    codes_mapping_setup();

    net_ids = model_net_configure(&num_nets);
    assert(num_nets==1);
    net_id = *net_ids;
    free(net_ids);
    
    num_servers = codes_mapping_get_lp_count("MODELNET_GRP", 0, "server",
            NULL, 1);
    if(net_id == DRAGONFLY)
    {
	  num_routers = codes_mapping_get_lp_count("MODELNET_GRP", 0,
                  "dragonfly_router", NULL, 1); 
	  offset = 1;
    }

    if(lp_io_prepare("modelnet-test", LP_IO_UNIQ_SUFFIX, &handle, MPI_COMM_WORLD) < 0)
    {
        return(-1);
    }

    tw_run();
    model_net_report_stats(net_id);

    if(lp_io_flush(handle, MPI_COMM_WORLD) < 0)
    {
        return(-1);
    }

    tw_end();
    return 0;
}
Пример #8
0
int
main(int argc, char **argv)
{
  int i;
  tw_lp          *lp;
  tw_kp          *kp;
  tw_pe          *pe;

  tw_opt_add(app_opt);
  tw_init(&argc, &argv);

  if( lookahead > 1.0 )
    tw_error(TW_LOC, "Lookahead > 1.0 .. needs to be less\n");
  
  //reset mean based on lookahead
  mean = mean - lookahead;

  g_tw_memory_nqueues = 16; // give at least 16 memory queue event
  
  offset_lpid = g_tw_mynode * nlp_per_pe;
  ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;
  g_tw_events_per_pe = (mult * nlp_per_pe * g_phold_start_events) + 
    optimistic_memory;
  //g_tw_rng_default = TW_FALSE;
  g_tw_lookahead = lookahead;
  
  tw_define_lps(nlp_per_pe, sizeof(tcp_phold_message));
  
    for(i = 0; i < g_tw_nlp; i++)
        tw_lp_settype(i, &mylps[0]);
  
  if( g_tw_mynode == 0 )
    {
      printf("Running simulation with following configuration: \n" );
      printf("    Processors Used = %d\n", tw_nnodes());
      printf("    KPs Used = %lu\n", g_tw_nkp);
      printf("    LPs Used = %u\n", nlp_per_model);
      printf("    End Time = %f \n", g_tw_ts_end);
      printf("    Buffers Allocated Per PE = %d\n", g_tw_events_per_pe);
      printf("    Gvt Interval = %d\n", g_tw_gvt_interval);
      printf("    Message Block Size (i.e., Batch) = %d\n", g_tw_mblock);
      printf("\n\n");
    }

  TWAppStats.sent_packets = 0; 
  TWAppStats.received_packets = 0; 
  TWAppStats.dropped_packets = 0;
  TWAppStats.timedout_packets = 0;
  TWAppStats.throughput = 0;
 
  tw_run();
  tw_end();
  tcp_finalize( &TWAppStats );
  return 0;
}
Пример #9
0
int main(int argc, char **argv) {
  int nprocs;
  int rank;
  int num_nets, *net_ids;
  int i;

  //for later random pairing:
  srand(time(NULL));
  
  g_tw_ts_end = s_to_ns(60*60*24*365);
  tw_opt_add(app_opt);
  tw_init(&argc, &argv);

  if (!conf_file_name[0]) {
    fprintf(stderr, "Expected \"codes-config\" option, please see --help.\n");
    MPI_Finalize();
    return 1;
  }

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  
  if (configuration_load(conf_file_name, MPI_COMM_WORLD, &config)) {
    fprintf(stderr, "Error loading config file %s.\n", conf_file_name);
    MPI_Finalize();
    return 1;
  }

  model_net_register();
  node_add_lp_type();
  codes_mapping_setup();
  net_ids = model_net_configure(&num_nets);
  assert(num_nets==1);
  net_id = *net_ids;
  free(net_ids);
  if(net_id != TORUS) {
    printf("\n This is written to simulate torus networks.");
    MPI_Finalize();
    return 0;
  }
  num_nodes = codes_mapping_get_lp_count(group_name, 0, "node", NULL, 1);
  configuration_get_value_int(&config, param_group_nm, num_reqs_key, NULL, &num_reqs);
  configuration_get_value_int(&config, param_group_nm, payload_sz_key, NULL, &payload_sz);
  configuration_get_value_int(&config, param_group_nm, num_messages_key, NULL, &num_messages);
  
  tw_run();
  
  model_net_report_stats(net_id);
  
  tw_end();
  free_node_mappings();

  return 0;
}
Пример #10
0
int main( int argc, char** argv )
{
  int i;

  tw_opt_add(app_opt);
  tw_init(&argc, &argv);
  printf("First version BGP model! \n");

  ////////////
  N_controller_per_DDN = NumControllerPerDDN;
  N_FS_per_DDN = N_controller_per_DDN * NumFSPerController;
  N_ION_per_DDN = N_FS_per_DDN * N_ION_per_FS;
  N_CN_per_DDN = N_ION_per_DDN * N_CN_per_ION;

  int N_lp_per_DDN = N_controller_per_DDN + N_FS_per_DDN + N_ION_per_DDN
  + N_CN_per_DDN + 1;

  int N_lp_t = N_lp_per_DDN * NumDDN;
  nlp_per_pe = N_lp_t/tw_nnodes()/g_tw_npe;

  N_DDN_per_PE = NumDDN/tw_nnodes()/g_tw_npe;

  g_tw_events_per_pe = nlp_per_pe * 32 + opt_mem;
  tw_define_lps(nlp_per_pe, sizeof(MsgData), 0);

  // mapping initialization

  int LPaccumulate = 0;
  for( i=0; i<N_CN_per_DDN*N_DDN_per_PE; i++ )
    tw_lp_settype(i, &mylps[0]);
  LPaccumulate += N_CN_per_DDN*N_DDN_per_PE;

  for( i=0; i<N_ION_per_DDN*N_DDN_per_PE; i++ )
    tw_lp_settype(i + LPaccumulate, &mylps[1]);
  LPaccumulate += N_ION_per_DDN*N_DDN_per_PE;

  for( i=0; i<N_FS_per_DDN*N_DDN_per_PE; i++ )
    tw_lp_settype(i + LPaccumulate, &mylps[2]);
  LPaccumulate += N_FS_per_DDN*N_DDN_per_PE;

  for( i=0; i<N_controller_per_DDN*N_DDN_per_PE; i++ )
    tw_lp_settype(i + LPaccumulate, &mylps[3]);
  LPaccumulate += N_controller_per_DDN*N_DDN_per_PE;

  for( i=0; i<N_DDN_per_PE; i++ )
    tw_lp_settype(i + LPaccumulate, &mylps[4]);

  tw_run();

  tw_end();
  return 0;

}
Пример #11
0
int gates_main(int argc, char* argv[]){

    int i, j;

    tw_opt_add(gates_opts);
    tw_init(&argc, &argv);

    // if (WAVE_COUNT != 0 && g_tw_synchronization_protocol > 2) {
    //     printf("ERROR: Waveform viewing is not supported for non-conservative protocols.\n");
    //     return 1;
    // }

    g_tw_mapping = CUSTOM;
    g_tw_custom_initial_mapping = &gates_custom_mapping_setup;
    g_tw_custom_lp_global_to_local_map = &gates_custom_mapping_to_local;
    routing_table_mpi = routing_table_mapper(tw_nnodes());

    g_tw_lp_offset = (*routing_table_mpi)[g_tw_mynode];
    g_tw_nlp = (*routing_table_mpi)[g_tw_mynode+1] - g_tw_lp_offset;
    g_tw_nkp = TOTAL_PARTS / tw_nnodes();
    if (g_tw_mynode < TOTAL_PARTS - (g_tw_nkp * tw_nnodes())) {
        g_tw_nkp++;
    }

    g_tw_events_per_pe = 100*g_tw_nlp;
    g_tw_lookahead = 0.001;

    tw_define_lps(g_tw_nlp, sizeof(message));

    g_tw_lp_types = gates_lps;
    g_io_lp_types = iolps;
    tw_lp_setup_types();

    g_io_events_buffered_per_rank = 0;
    char cpname[100];
    char *dpath = dirname(argv[0]);
    sprintf(cpname, "%s/checkpoint/submodule-checkpoint", dpath);
    io_init();
    io_load_checkpoint(cpname, PRE_INIT);

#if DEBUG_TRACE
    char debugfile[100];
    sprintf(debugfile, "%s/debug/node_%d_output_file.txt", dpath, g_tw_mynode);
    node_out_file = fopen(debugfile,"w");
#endif

    tw_run();

    tw_end();

    return 0;
}
int main(
    int argc,
    char **argv)
{
    int nprocs;
    int rank;
    int lps_per_proc;
    int i;
    int ret;
    lp_io_handle handle;

    g_tw_ts_end = 60*60*24*365;

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);
 
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
  
    if((NUM_SERVERS) % nprocs)
    {
        fprintf(stderr, "Error: number of server LPs (%d total) is not evenly divisible by the number of MPI processes (%d)\n", NUM_SERVERS, nprocs);
        exit(-1);
    }

    lps_per_proc = (NUM_SERVERS) / nprocs;

    tw_define_lps(lps_per_proc, sizeof(struct svr_msg), 0);

    for(i=0; i<lps_per_proc; i++)
    {
        tw_lp_settype(i, &svr_lp);
    }

    g_tw_lookahead = 100;

    ret = lp_io_prepare("lp-io-test-results", LP_IO_UNIQ_SUFFIX, &handle, MPI_COMM_WORLD);
    if(ret < 0)
    {
       return(-1); 
    }

    tw_run();

    ret = lp_io_flush(handle, MPI_COMM_WORLD);
    assert(ret == 0);

    tw_end();

    return 0;
}
Пример #13
0
int
main(int argc, char **argv, char **env)
{
	int		 i;

        // get rid of error if compiled w/ MEMORY queues
        g_tw_memory_nqueues=1;
	// set a min lookahead of 1.0
	lookahead = 1.0;
	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	if( lookahead > 1.0 )
	  tw_error(TW_LOC, "Lookahead > 1.0 .. needs to be less\n");

	//reset mean based on lookahead
        mean = mean - lookahead;

        g_tw_memory_nqueues = 16; // give at least 16 memory queue event

	offset_lpid = g_tw_mynode * nlp_per_pe;
	ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;
	g_tw_events_per_pe = (mult * nlp_per_pe * g_phold_start_events) +
				optimistic_memory;
	//g_tw_rng_default = TW_FALSE;
	g_tw_lookahead = lookahead;

	tw_define_lps(nlp_per_pe, sizeof(phold_message));

	for(i = 0; i < g_tw_nlp; i++)
		tw_lp_settype(i, &mylps[0]);

        if( g_tw_mynode == 0 )
	  {
	    printf("========================================\n");
	    printf("PHOLD Model Configuration..............\n");
	    printf("   Lookahead..............%lf\n", lookahead);
	    printf("   Start-events...........%u\n", g_phold_start_events);
	    printf("   stagger................%u\n", stagger);
	    printf("   Mean...................%lf\n", mean);
	    printf("   Mult...................%lf\n", mult);
	    printf("   Memory.................%u\n", optimistic_memory);
	    printf("   Remote.................%lf\n", percent_remote);
	    printf("========================================\n\n");
	  }

	tw_run();
	tw_end();

	return 0;
}
Пример #14
0
int main(
    int argc,
    char **argv)
{
    int nprocs;
    int rank;
    lp_io_handle handle;
    int ret;

    g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    /* read in configuration file */
    ret = configuration_load(conf_file_name, MPI_COMM_WORLD, &config);
    if (ret)
    {
        fprintf(stderr, "Error opening config file: %s\n", conf_file_name);
        return(-1);
    }

    lp_type_register("server", &svr_lp);
    lsm_register();

    codes_mapping_setup();

    lsm_configure();

    ret = lp_io_prepare("lsm-test", LP_IO_UNIQ_SUFFIX, &handle, MPI_COMM_WORLD);
    if(ret < 0)
    {
       return(-1);
    }

    INIT_CODES_CB_INFO(&cb_info, svr_msg, h, tag, ret);

    tw_run();

    ret = lp_io_flush(handle, MPI_COMM_WORLD);
    assert(ret == 0);

    tw_end();

    return 0;
}
int main(int argc, char *argv[])
{
    int num_nets, *net_ids;
    g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    if (!conf_file_name[0]) {
        fprintf(stderr, "Expected \"codes-config\" option, please see --help.\n");
        MPI_Finalize();
        return 1;
    }

    /* loading the config file into the codes-mapping utility, giving us the
     * parsed config object in return. 
     * "config" is a global var defined by codes-mapping */
    if (configuration_load(conf_file_name, MPI_COMM_WORLD, &config)){
        fprintf(stderr, "Error loading config file %s.\n", conf_file_name);
        MPI_Finalize();
        return 1;
    }

    /* currently restrict to simplenet, as other networks are trickier to
     * setup. TODO: handle other networks properly */
    if(net_id != SIMPLENET) {
        printf("\n The test works with simple-net configuration only! ");
        MPI_Finalize();
        return 1;
    }

    testsvr_init();
    model_net_register();

    codes_mapping_setup();

    /* Setup the model-net parameters specified in the global config object,
     * returned are the identifier for the network type */
    net_ids = model_net_configure(&num_nets);
    assert(num_nets==1);
    net_id = *net_ids;
    free(net_ids);

    tw_run();
    tw_end();

    return 0;
}
Пример #16
0
int main(int argc, char * argv[]) {
  int NumPE = 0;
  int i = 0;
  tw_lp *lp;
  tw_kp *kp;
  g_tw_ts_end = 300;
  g_tw_gvt_interval = 16;

  printf("Please enter the number of processors: ");
  scanf("%d",&NumPE);

  if(NumPE < 1 && NumPE > 4){
    printf("Invalid number of processors %d\n", NumPE);
    exit(0);
  }
  /* tw_lptype NumPE NumKP NumLP Message_Size*/
  
  tw_init(airport_lps, NumPE, 3, 3, sizeof(Msg_Data));
  
  for(i = 0; i < 3; i++){
    lp = tw_getlp(i);
    kp = tw_getkp(i);
    tw_lp_settype(lp, AIR_LP);
    tw_lp_onkp(lp, kp);
    
    if(i >= NumPE){
      tw_lp_onpe(lp, tw_getpe(NumPE - 1));
      tw_kp_onpe(kp, tw_getpe(NumPE - 1));
    }
    else {
      tw_lp_onpe(lp, tw_getpe(i));
      tw_kp_onpe(kp, tw_getpe(i));
    }
  }
  tw_run();

  printf("Number of Landings: %d\n", NumLanded);

  return 0;
}
Пример #17
0
/*
 * main	- start function for the model, setup global state space of model and
 *	  init and run the simulation executive.  Also must map LPs to PEs
 */
int
main(int argc, char **argv, char **env)
{
	int		 i;

	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	g_tw_events_per_pe = (g_tw_nlp / g_tw_npe) * 4;
	g_tw_memory_nqueues = 2;

	tw_define_lps(g_tw_nlp, sizeof(epi_message), 0);

	epi_disease_init();

	// create KP memory buffer queues
	for(i = 0; i < g_tw_nkp; i++)
	{
		g_epi_fd = tw_kp_memory_init(tw_getkp(i), 
					//0, //g_epi_nagents / g_tw_nkp,
					g_epi_nagents / g_tw_nkp,
				  	sizeof(epi_agent), 1);
		g_epi_pathogen_fd = tw_kp_memory_init(tw_getkp(i), 
					(g_epi_nagents / g_tw_nkp) * g_epi_ndiseases,
				  	sizeof(epi_pathogen), 1);
	}

	// Round-robin mapping of LPs to KPs and KPs to PEs
	for(i = 0; i < g_tw_nlp; i++)
		tw_lp_settype(i, &mylps[0]);

	// read input files, create global data structures, etc
	epi_agent_init();

	tw_run();

	epi_model_final();

	return 0;
}
int main(
    int argc,
    char **argv)
{
    int num_nets;
    int *net_ids;
    g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    if(argc < 2)
    {
	    printf("\n Usage: mpirun <args> --sync=[1,3] -- mapping_file_name.conf (optional --nkp) ");
	    MPI_Finalize();
	    return 0;
    }

    configuration_load(argv[2], MPI_COMM_WORLD, &config);

    model_net_register();
    svr_add_lp_type();
    
    codes_mapping_setup();
    
    net_ids = model_net_configure(&num_nets);
    assert(num_nets==1);
    net_id = *net_ids;
    free(net_ids);

    assert(net_id == SIMPLENET);
    assert(NUM_SERVERS == codes_mapping_get_lp_count("MODELNET_GRP", 0,
                "server", NULL, 1));

    tw_run();

    tw_end();
    return prog_rtn;
}
int main(
    int argc,
    char **argv)
{
    int nprocs;
    int rank;
    int num_nets;
    int *net_ids;
    char* anno;

    lp_io_handle handle;

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);
    offset = 1;

    if(argc < 2)
    {
            printf("\n Usage: mpirun <args> --sync=2/3 mapping_file_name.conf (optional --nkp) ");
            MPI_Finalize();
            return 0;
    }

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    configuration_load(argv[2], MPI_COMM_WORLD, &config);

    model_net_register();
    svr_add_lp_type();

    codes_mapping_setup();

    net_ids = model_net_configure(&num_nets);
    assert(num_nets==1);
    net_id = *net_ids;
    free(net_ids);

    if(net_id != DRAGONFLY)
    {
	printf("\n The test works with dragonfly model configuration only! ");
        MPI_Finalize();
        return 0;
    }
    num_servers_per_rep = codes_mapping_get_lp_count("MODELNET_GRP", 1, "server",
            NULL, 1);
    configuration_get_value_int(&config, "PARAMS", "num_routers", anno, &num_routers_per_grp);
    
    num_groups = (num_routers_per_grp * (num_routers_per_grp/2) + 1);
    num_nodes = num_groups * num_routers_per_grp * (num_routers_per_grp / 2);
    num_nodes_per_grp = num_routers_per_grp * (num_routers_per_grp / 2);

    if(lp_io_prepare("modelnet-test", LP_IO_UNIQ_SUFFIX, &handle, MPI_COMM_WORLD) < 0)
    {
        return(-1);
    }

    tw_run();
    model_net_report_stats(net_id);

    if(lp_io_flush(handle, MPI_COMM_WORLD) < 0)
    {
        return(-1);
    }

    tw_end();
    return 0;
}
Пример #20
0
int srw_main(int argc, char *argv[])
{
  int i;
  int num_lps_per_pe = 1;

  tw_opt_add(srw_opts);

  /* This configures g_tw_npe */
  tw_init(&argc, &argv);

  /* Must call this to properly set g_tw_nlp */
  tw_define_lps(num_lps_per_pe, sizeof(srw_msg_data), 0);

#ifdef WITH_NETDMF
  /* IF we ARE using NETDMF... */

  if (!strcmp("", netdmf_config)) {
    /* AND we DO NOT have a config file */

    /* There is no NetDMF configuration file.  Create a scenario with fake
     * data, i.e. no changes are required: fake data is created by default */
    for (i = 0; i < g_tw_nlp; i++) {
      tw_lp_settype(i, &srw_lps[0]);
    }
    printf("No NetDMF configuration specified.\n");
  }
  else {
    /* OR we DO have a config file */

    /* Must repeatedly call this to copy the function pointers appropriately */
    for (i = 0; i < g_tw_nlp; i++) {
      tw_lp_settype(i, &netdmf_srw_lps[0]);
    }
    /* Read in the netdmf_config file.  This must be done after
     * we set up the LPs (via tw_lp_settype) so we have data
     * to configure. */
    for (i = 0; i < g_tw_npe; i++) {
      tw_pe_settype(g_tw_pe[i], &srw_pes[0]);
    }
  }
#else /* WITH_NETDMF */
  /* WE are NOT using NETDMF */

  /* Must repeatedly call this to copy the function pointers appropriately */
  for (i = 0; i < g_tw_nlp; i++) {
    tw_lp_settype(i, &srw_lps[0]);
  }
#endif /* WITH_NETDMF */

  tw_run();

  if (tw_ismaster()) {
    printf("Total radios in simulation:    %d\n", total_radios);
    printf("Total movements:               %d\n", total_movements);
    printf("Total communcation failures:   %d\n", total_fail);
    printf("Total communcation attempts:   %d\n", total_comm);
    printf("Total communication successes: %d\n", total_comm - total_fail);
    printf("Node %d moved the most:        %d\n", top_node, top_move);
  }

  tw_end();

  return 0;
}
Пример #21
0
int main(int argc, char * argv[])
{
	g_tw_ts_end = 30;
	g_tw_gvt_interval = 16;
	int i;

	// get rid of error if compiled w/ MEMORY queues
	g_tw_memory_nqueues=1;
	// set a min lookahead of 1.0
	lookahead = 1.0;
	//tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	if( lookahead > 1.0 )
		tw_error(TW_LOC, "Lookahead > 1.0 .. needs to be less\n");

	//reset mean based on lookahead
	mean = mean - lookahead;

	g_tw_memory_nqueues = 16; // give at least 16 memory queue event

	offset_lpid = g_tw_mynode * nlp_per_pe;
	ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;
	//g_tw_rng_default = TW_FALSE;
	g_tw_lookahead = lookahead;

	nlp_per_pe = (NUM_CELLS_X * NUM_CELLS_Y) / (tw_nnodes() * g_tw_npe);
	g_tw_events_per_pe = (mult * nlp_per_pe * g_traffic_start_events) +
		optimistic_memory;
	num_cells_per_kp = (NUM_CELLS_X * NUM_CELLS_Y) / (NUM_VP_X * NUM_VP_Y);
	vp_per_proc = (NUM_VP_X * NUM_VP_Y) / ((tw_nnodes() * g_tw_npe)) ;
	g_vp_per_proc = vp_per_proc;
	g_tw_nlp = nlp_per_pe;
	g_tw_nkp = vp_per_proc;

	g_tw_mapping = CUSTOM;
	g_tw_custom_initial_mapping = &traffic_grid_mapping;
	g_tw_custom_lp_global_to_local_map = &CellMapping_to_lp;

	tw_define_lps(nlp_per_pe, sizeof(Msg_Data));

	for(i = 0; i < g_tw_nlp; i++)
		tw_lp_settype(i, &mylps[0]);

	if( g_tw_mynode == 0 )
	{
		printf("========================================\n");
		printf("Traffice Model Configuration..............\n");
		printf("   Lookahead..............%lf\n", lookahead);
		printf("   Start-events...........%u\n", g_traffic_start_events);
		printf("   stagger................%u\n", stagger);
		printf("   Mean...................%lf\n", mean);
		printf("   Mult...................%lf\n", mult);
		printf("   Memory.................%u\n", optimistic_memory);
		printf("   Remote.................%lf\n", percent_remote);
		printf("========================================\n\n");
	}

	tw_run();
	tw_end();

	printf("Number of Arivals: %lld\n", totalCars);
	printf("Number of Cars reached their dest: %lld\n", carsFinished);

	return 0;
}
Пример #22
0
int
main(int argc, char **argv, char **env)
{

#ifdef TEST_COMM_ROSS
    // Init outside of ROSS
    MPI_Init(&argc, &argv);
    // Split COMM_WORLD in half even/odd
    int mpi_rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm split_comm;
    MPI_Comm_split(MPI_COMM_WORLD, mpi_rank%2, mpi_rank, &split_comm);
    if(mpi_rank%2 == 1){
        // tests should catch any MPI_COMM_WORLD collectives
        MPI_Finalize();
    }
    // Allows ROSS to function as normal
    tw_comm_set(split_comm);
#endif

	int		 i;

        // get rid of error if compiled w/ MEMORY queues
        g_tw_memory_nqueues=1;
	// set a min lookahead of 1.0
	lookahead = 1.0;
	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

#ifdef USE_DAMARIS
    if(g_st_ross_rank)
    { // only ross ranks should run code between here and tw_run()
#endif
	if( lookahead > 1.0 )
	  tw_error(TW_LOC, "Lookahead > 1.0 .. needs to be less\n");

	//reset mean based on lookahead
        mean = mean - lookahead;

        g_tw_memory_nqueues = 16; // give at least 16 memory queue event

	offset_lpid = g_tw_mynode * nlp_per_pe;
	ttl_lps = tw_nnodes() * g_tw_npe * nlp_per_pe;
	g_tw_events_per_pe = (mult * nlp_per_pe * g_phold_start_events) +
				optimistic_memory;
	//g_tw_rng_default = TW_FALSE;
	g_tw_lookahead = lookahead;

	tw_define_lps(nlp_per_pe, sizeof(phold_message));

	for(i = 0; i < g_tw_nlp; i++)
    {
		tw_lp_settype(i, &mylps[0]);
        st_model_settype(i, &model_types[0]);
    }

        if( g_tw_mynode == 0 )
	  {
	    printf("========================================\n");
	    printf("PHOLD Model Configuration..............\n");
	    printf("   Lookahead..............%lf\n", lookahead);
	    printf("   Start-events...........%u\n", g_phold_start_events);
	    printf("   stagger................%u\n", stagger);
	    printf("   Mean...................%lf\n", mean);
	    printf("   Mult...................%lf\n", mult);
	    printf("   Memory.................%u\n", optimistic_memory);
	    printf("   Remote.................%lf\n", percent_remote);
	    printf("========================================\n\n");
	  }

	tw_run();
#ifdef USE_DAMARIS
    } // end if(g_st_ross_rank)
#endif
	tw_end();

	return 0;
}
Пример #23
0
int
main(int argc, char **argv, char **env)
{
	int i;
	num_buf_slots = vc_size / chunk_size;
	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	if(strcmp(traffic_str, "uniform") == 0)
	    TRAFFIC = UNIFORM_RANDOM;
	else if(strcmp(traffic_str, "nearest") == 0)
	    TRAFFIC = NEAREST_NEIGHBOR;
	else if(strcmp(traffic_str, "diagonal") == 0)
	    TRAFFIC = DIAGONAL;
        else
	   printf("\n Incorrect traffic pattern specified, using %s as default ", traffic_str );

	/* for automatically reducing the channel link bandwidth of a 7-D or a 9-D torus */
	link_bandwidth = (link_bandwidth * 10) / (2 * N_dims);

        injection_limit = injection_interval / MEAN_INTERVAL;

	for (i=0; i<N_dims; i++)
        {
	  N_nodes*=dim_length[i];
	  N_mpi_procs*=dim_length[i];
	}
	nlp_nodes_per_pe = N_nodes/tw_nnodes()/g_tw_npe;
	nlp_mpi_procs_per_pe = N_mpi_procs/tw_nnodes()/g_tw_npe;

	num_rows = sqrt(N_nodes);
	num_cols = num_rows;

	total_lps = g_tw_nlp * tw_nnodes();
	node_rem = N_nodes % (tw_nnodes()/g_tw_npe);

	if(g_tw_mynode < node_rem)
	   {
		nlp_nodes_per_pe++;
		nlp_mpi_procs_per_pe++;
	   }
        num_packets=1;
        num_chunks = PACKET_SIZE/chunk_size;

        if( mpi_message_size > PACKET_SIZE)
         {
          num_packets = mpi_message_size / PACKET_SIZE;  
          
	  if(mpi_message_size % PACKET_SIZE != 0 )
	    num_packets++;
         }

	g_tw_mapping=CUSTOM;
     	g_tw_custom_initial_mapping=&torus_mapping;
        g_tw_custom_lp_global_to_local_map=&torus_mapping_to_lp;

	g_tw_events_per_pe = mem_factor * 1024 * (nlp_nodes_per_pe/g_tw_npe + nlp_mpi_procs_per_pe/g_tw_npe) + opt_mem;
	tw_define_lps(nlp_nodes_per_pe + nlp_mpi_procs_per_pe, sizeof(nodes_message), 0);

	head_delay = (1 / link_bandwidth) * chunk_size;
	
        // BG/L torus network paper: Tokens are 32 byte chunks that is why the credit delay is adjusted according to bandwidth * 32
	credit_delay = (1 / link_bandwidth) * 8;
	packet_offset = (g_tw_ts_end/MEAN_INTERVAL) * num_packets; 
	
	if(tw_ismaster())
	{
		printf("\nTorus Network Model Statistics:\n");
		printf("Number of nodes: %d Torus dimensions: %d ", N_nodes, N_dims);
		printf(" Link Bandwidth: %f Traffic pattern: %s \n", link_bandwidth, traffic_str );
	}

	tw_run();
	unsigned long long total_finished_storage[N_COLLECT_POINTS];
	unsigned long long total_generated_storage[N_COLLECT_POINTS];
	unsigned long long total_num_hops[N_COLLECT_POINTS];
	unsigned long long total_queue_depth[N_COLLECT_POINTS];
	unsigned long long wait_length,event_length,N_total_packets_finish, N_total_msgs_finish, N_total_hop;
	tw_stime total_time_sum,g_max_latency;

	for( i=0; i<N_COLLECT_POINTS; i++ )
	  {
	    MPI_Reduce( &N_finished_storage[i], &total_finished_storage[i],1,
                        MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	    MPI_Reduce( &N_generated_storage[i], &total_generated_storage[i],1,
                        MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	    MPI_Reduce( &N_num_hops[i], &total_num_hops[i],1,
			MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	    MPI_Reduce( &N_queue_depth[i], &total_queue_depth[i],1,
		         MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	  }

	MPI_Reduce( &total_time, &total_time_sum,1, 
		    MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &N_finished_packets, &N_total_packets_finish,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
        MPI_Reduce( &N_finished_msgs, &N_total_msgs_finish,1, 
                    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);

	MPI_Reduce( &total_hops, &N_total_hop,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &max_latency, &g_max_latency,1, 
		    MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);

	for( i=1; i<N_COLLECT_POINTS; i++ )
	  {
	    total_finished_storage[i]+=total_finished_storage[i-1];
	    total_generated_storage[i]+=total_generated_storage[i-1];
	  }

//	if( total_lp_time > 0 )
//	   total_lp_time /= num_mpi_msgs;

	if(tw_ismaster())
	  {
	    printf("\n ****************** \n");
	    printf("\n total packets finished:         %lld and %lld; \n",
		   total_finished_storage[N_COLLECT_POINTS-1],N_total_packets_finish);
	    printf("\n total MPI messages finished:         %lld; \n",
                   N_total_msgs_finish);
	    printf("\n total generate:       %lld; \n",
		   total_generated_storage[N_COLLECT_POINTS-1]);
	    printf("\n total hops:           %lf; \n",
		   (double)N_total_hop/N_total_packets_finish);
	    printf("\n average travel time:  %lf; \n\n",
		   total_time_sum/N_total_msgs_finish);
#if REPORT_BANDWIDTH	    
	    for( i=0; i<N_COLLECT_POINTS; i++ )
	      {
		printf(" %d ",i*100/N_COLLECT_POINTS);
		printf("total finish: %lld; generate: %lld; alive: %lld \n ",
		       total_finished_storage[i],
		       total_generated_storage[i],
		       total_generated_storage[i]-total_finished_storage[i]);
	      }

	    // capture the steady state statistics
        tw_stime bandwidth;
        tw_stime interval = (g_tw_ts_end / N_COLLECT_POINTS);
	interval = interval/(1000.0 * 1000.0 * 1000.0); //convert ns to seconds
        for( i=1; i<N_COLLECT_POINTS; i++ )
           {
                bandwidth = total_finished_storage[i] - total_finished_storage[i - 1];
		unsigned long long avg_hops = total_num_hops[i]/bandwidth;
                bandwidth = (bandwidth * PACKET_SIZE) / (1024.0 * 1024.0 * 1024.0); // convert bytes to GB
                bandwidth = bandwidth / interval; 
                printf("\n Interval %0.7lf Bandwidth %lf avg hops %lld queue depth %lld ", interval, bandwidth, avg_hops, (unsigned long long)total_queue_depth[i]/num_chunks);
           }

	    unsigned long long steady_sum=0;
	    for( i = N_COLLECT_POINTS/2; i<N_COLLECT_POINTS;i++)
	      steady_sum+=total_generated_storage[i]-total_finished_storage[i];
	    printf("\n Steady state, packet alive: %lld\n",
		   2*steady_sum/N_COLLECT_POINTS);
#endif
	    printf("\nMax latency is %lf\n\n",g_max_latency);
	}
//	  if(packet_sent > 0 || credit_sent > 0)
//	    printf("\n Packet sent are %d, credit sent %d ", packet_sent, credit_sent);
	tw_end();
	return 0;
}
Пример #24
0
int
main(int argc, char **argv, char **env)
{
	int i;
	tw_opt_add(app_opt);
	tw_init(&argc, &argv);

	for (i=0; i<N_dims; i++)
	  N_nodes*=dim_length[i];

	MEAN_INTERVAL = N_nodes/ARRIVAL_RATE;

	nlp_per_pe = N_nodes/tw_nnodes()/g_tw_npe;
	g_tw_events_per_pe = nlp_per_pe/g_tw_npe + opt_mem;
	tw_define_lps(nlp_per_pe, sizeof(nodes_message), 0);

	for(i = 0; i < g_tw_nlp; i++)
	  tw_lp_settype(i, &nodes_lps[0]);

	tw_run();

	if(tw_ismaster())
	{
		printf("\nTorus Network Model Statistics:\n");

		printf("\t%-50s %11lld\n", "Number of nodes", 
			nlp_per_pe * g_tw_npe * tw_nnodes());
	}

	unsigned long long total_finished_storage[N_COLLECT_POINTS];
	unsigned long long total_dropped_storage[N_COLLECT_POINTS];
	unsigned long long total_generated_storage[N_COLLECT_POINTS];
	unsigned long long wait_length,event_length,N_total_finish,N_total_hop;
	tw_stime total_time_sum,g_max_latency;

	for( i=0; i<N_COLLECT_POINTS; i++ )
	  {
	    MPI_Reduce( &N_dropped_storage[i], &total_dropped_storage[i],1, 
			MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	    MPI_Reduce( &N_finished_storage[i], &total_finished_storage[i],1,
                        MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	    MPI_Reduce( &N_generated_storage[i], &total_generated_storage[i],1,
                        MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	  }

	MPI_Reduce( &queueing_times_sum, &event_length,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &total_queue_length, &wait_length,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &total_time, &total_time_sum,1, 
		    MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &N_finished, &N_total_finish,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &total_hops, &N_total_hop,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
	MPI_Reduce( &max_latency, &g_max_latency,1, 
		    MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);

	unsigned long long total_rand_total;
	MPI_Reduce( &rand_total, &total_rand_total,1, 
		    MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);

	for( i=1; i<N_COLLECT_POINTS; i++ )
	  {
	    total_dropped_storage[i]+=total_dropped_storage[i-1];
	    total_finished_storage[i]+=total_finished_storage[i-1];
	    total_generated_storage[i]+=total_generated_storage[i-1];
	  }

	if(tw_ismaster())
	  {
	    printf("\n ****************** \n");
	    printf("\n Total drop:           %lld; \n",
		   total_dropped_storage[N_COLLECT_POINTS-1]);
	    printf("\n total finish:         %lld and %lld; \n",
		   total_finished_storage[N_COLLECT_POINTS-1],N_total_finish);
	    printf("\n total generate:       %lld; \n",
		   total_generated_storage[N_COLLECT_POINTS-1]);
	    printf("\n total hops:           %lf; \n",
		   (double)N_total_hop/total_finished_storage[N_COLLECT_POINTS-1]);
	    printf("\n total wait length:    %lf; \n",
		   (double)wait_length/total_finished_storage[N_COLLECT_POINTS-1]);
	    printf("\n total total queued:   %lf; \n",
		   (double)event_length/total_finished_storage[N_COLLECT_POINTS-1]);
	    printf("\n average travel time:  %lf; \n\n",
		   total_time_sum/total_finished_storage[N_COLLECT_POINTS-1]);
	    
	    for( i=0; i<N_COLLECT_POINTS; i++ )
	      {
		printf(" %d ",i*100/N_COLLECT_POINTS);
		printf("drop: %lld; finish: %lld; generate: %lld; alive: %lld\n",
		       total_dropped_storage[i],
		       total_finished_storage[i],
		       total_generated_storage[i],
		       total_generated_storage[i]-total_finished_storage[i]);
		       
	      }

	    // capture the steady state statistics
	    unsigned long long steady_sum=0;
	    for( i = N_COLLECT_POINTS/2; i<N_COLLECT_POINTS;i++)
	      steady_sum+=total_generated_storage[i]-total_finished_storage[i];
	    printf("\n Steady state, packet alive: %lld\n",
		   2*steady_sum/N_COLLECT_POINTS);
	    printf("Aeverage is %lld\n",total_rand_total/total_generated_storage[N_COLLECT_POINTS-1]);
	    printf("\nMax latency is %lf\n\n",g_max_latency);

	  }

	tw_end();
	return 0;
}
int main(int argc, char *argv[]) {
	//printf("In main\n");
	g_tw_ts_end = s_to_ns(60 * 60 * 24 * 365); /* one year, in nsecs */

	/* ROSS initialization function calls */
	tw_opt_add(app_opt); /* add user-defined args */
	/* initialize ROSS and parse args. NOTE: tw_init calls MPI_Init */
	tw_init(&argc, &argv);

	if (!conf_file_name[0]) {
		tw_error(TW_LOC,
				"Expected \"codes-config\" option, please see --help.\n");
		return 1;
	}

	/* loading the config file into the codes-mapping utility, giving us the
	 * parsed config object in return.
	 * "config" is a global var defined by codes-mapping */
	if (configuration_load(conf_file_name, MPI_COMM_WORLD, &config)) {
		tw_error(TW_LOC, "Error loading config file %s.\n", conf_file_name);
		return 1;
	}
	lsm_register();
	//lsm_configure();
	/* register model-net LPs with ROSS */
	model_net_register();

	/* register the user LPs */
	node_register();
	forwarder_register();

	/* setup the LPs and codes_mapping structures */
	codes_mapping_setup();

	/* setup the globals */
	int rc = configuration_get_value_int(&config, "server_pings", "num_reqs",
			NULL, &num_reqs);
	if (rc != 0)
		tw_error(TW_LOC, "unable to read server_pings:num_reqs");
	int payload_sz_d;
	rc = configuration_get_value_int(&config, "server_pings", "payload_sz",
			NULL, &payload_sz_d);
	if (rc != 0)
		tw_error(TW_LOC, "unable to read server_pings:payload_sz");
	payload_sz = (uint64_t) payload_sz_d;

	/* get the counts for the client and svr clusters */
	num_client_nodes = codes_mapping_get_lp_count("client_CLUSTER", 0, "node",
			NULL, 1);
	num_svr_nodes = codes_mapping_get_lp_count("svr_CLUSTER", 0, "node", NULL,
			1);
	num_burst_buffer_nodes = codes_mapping_get_lp_count("bb_CLUSTER", 0, "node",
			NULL, 1);
	num_storage_nodes = codes_mapping_get_lp_count("storage_CLUSTER", 0, "node",
			NULL, 1);
	num_client_forwarders = codes_mapping_get_lp_count("client_FORWARDERS", 0,
			"forwarder", NULL, 1);
	num_svr_forwarders = codes_mapping_get_lp_count("svr_FORWARDERS", 0,
			"forwarder", NULL, 1);
	num_burst_buffer_forwarders = codes_mapping_get_lp_count("bb_FORWARDERS", 0,
			"forwarder", NULL, 1);
	num_storage_forwarders = codes_mapping_get_lp_count("storage_FORWARDERS", 0,
			"forwarder", NULL, 1);

	/* Setup the model-net parameters specified in the global config object,
	 * returned are the identifier(s) for the network type.
	 * 1 ID  -> all the same modelnet model
	 * 2 IDs -> clusters are the first id, forwarding network the second
	 * 3 IDs -> client is first, svr and bb second and forwarding network the third
	 * 4 IDs -> cluster client is the first, svr is the second, burst buffer the third and forwarding network the last
	 *          */
	int num_nets;
	int *net_ids = model_net_configure(&num_nets);
	assert(num_nets <= 5);
	if (num_nets == 1) {
		net_id_client = net_ids[0];
		net_id_svr = net_ids[0];
		net_id_bb = net_ids[0];
		net_id_storage = net_ids[0];
		net_id_forwarding = net_ids[0];
	} else if (num_nets == 2) {
		net_id_client = net_ids[0];
		net_id_svr = net_ids[0];
		net_id_bb = net_ids[0];
		net_id_storage = net_ids[0];
		net_id_forwarding = net_ids[1];
	} else if (num_nets == 3) {
		net_id_client = net_ids[0];
		net_id_svr = net_ids[1];
		net_id_bb = net_ids[1];
		net_id_storage = net_ids[1];
		net_id_forwarding = net_ids[2];
	} else if (num_nets == 4) {
		net_id_client = net_ids[0];
		net_id_svr = net_ids[1];
		net_id_bb = net_ids[2];
		net_id_storage = net_ids[2];
		net_id_forwarding = net_ids[3];
	} else {
		net_id_client = net_ids[0];
		net_id_svr = net_ids[1];
		net_id_bb = net_ids[2];
		net_id_storage = net_ids[3];
		net_id_forwarding = net_ids[4];
	}
	free(net_ids);

	configuration_get_value_int(&config, param_group_nm, num_reqs_key, NULL,
			&num_reqs);
	configuration_get_value_int(&config, param_group_nm, payload_sz_key, NULL,
			(int *) &payload_sz);
	configuration_get_value_int(&config, param_group_nm, pvfs_file_sz_key, NULL,
			&pvfs_file_sz); /*Sughosh: added for pvfsfs*/
	configuration_get_value_int(&config, param_group_nm, bb_file_size_key, NULL,
			&bb_file_sz); /*Tony: added for bb*/
	configuration_get_value_int(&config, param_group_nm, bb_capacity_key, NULL,
			&burst_buffer_max_capacity); /*Tony: added for bb*/

	/* begin simulation */
	model_net_report_stats(net_id);
	tw_run();

	tw_end();

	return 0;
}
int main( int argc, char** argv )
{
  int rank, nprocs;
  int num_nets;
  int* net_ids;

  g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */

  workload_type[0]='\0';
  tw_opt_add(app_opt);
  tw_init(&argc, &argv);

  if(strlen(workload_file) == 0)
    {
	if(tw_ismaster())
		printf("\n Usage: mpirun -np n ./codes-nw-test --sync=1/2/3 --workload_type=type --workload_file=workload-file-name");
	tw_end();
	return -1;
    }

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

   configuration_load(argv[2], MPI_COMM_WORLD, &config);

   nw_add_lp_type();

   codes_mapping_setup();

   num_net_lps = codes_mapping_get_lp_count("MODELNET_GRP", 0, "nw-lp", NULL, 0);
   
    tw_run();

    long long total_bytes_sent, total_bytes_recvd;
    double avg_run_time;
    double avg_comm_run_time;
    double avg_col_run_time;
    double total_avg_send_time;
    double total_avg_wait_time;
    double total_avg_recv_time;
    double total_avg_col_time;
    double total_avg_comp_time;
    long overall_sends, overall_recvs, overall_waits, overall_cols;
	
    MPI_Reduce(&num_bytes_sent, &total_bytes_sent, 1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
    MPI_Reduce(&num_bytes_recvd, &total_bytes_recvd, 1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_time, &avg_run_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

   MPI_Reduce(&avg_recv_time, &total_avg_recv_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_comm_time, &avg_comm_run_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_col_time, &avg_col_run_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
  MPI_Reduce(&avg_wait_time, &total_avg_wait_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_send_time, &total_avg_send_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&avg_compute_time, &total_avg_comp_time, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&total_sends, &overall_sends, 1, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&total_recvs, &overall_recvs, 1, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&total_waits, &overall_waits, 1, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&total_collectives, &overall_cols, 1, MPI_LONG, MPI_SUM, 0, MPI_COMM_WORLD);

   if(!g_tw_mynode)
	printf("\n Total bytes sent %lld recvd %lld \n avg runtime %lf \n avg comm time %lf avg compute time %lf \n avg collective time %lf avg send time %lf \n avg recv time %lf \n avg wait time %lf \n total sends %ld total recvs %ld total waits %ld total collectives %ld ", total_bytes_sent, total_bytes_recvd, 
			avg_run_time/num_net_lps,
			avg_comm_run_time/num_net_lps,
			total_avg_comp_time/num_net_lps,
			total_avg_col_time/num_net_lps,
			total_avg_send_time/num_net_lps,
			total_avg_recv_time/num_net_lps,
			total_avg_wait_time/num_net_lps,
			overall_sends, overall_recvs, overall_waits, overall_cols);
   tw_end();
  
  return 0;
}
Пример #27
0
int olsr_main(int argc, char *argv[])
{
    int i;
    char log[32];

    tw_opt_add(olsr_opts);
    tw_init(&argc, &argv);

#if DEBUG
    sprintf( log, "olsr-log.%ld", g_tw_mynode );

    olsr_event_log = fopen( log, "w+");
    if( olsr_event_log == nullptr )
        tw_error( TW_LOC, "Failed to Open OLSR Event Log file \n");
#endif

    g_tw_mapping = CUSTOM;
    g_tw_custom_initial_mapping = &olsr_custom_mapping;
    g_tw_custom_lp_global_to_local_map = &olsr_mapping_to_lp;

    // nlp_per_pe = OLSR_MAX_NEIGHBORS;// / tw_nnodes();
    //g_tw_lookahead = SA_INTERVAL;

    SA_range_start = nlp_per_pe;

    // Increase nlp_per_pe by nlp_per_pe / OMN
    nlp_per_pe += nlp_per_pe / OLSR_MAX_NEIGHBORS;

    g_tw_events_per_pe =  OLSR_MAX_NEIGHBORS / 2 * nlp_per_pe  + 65536;
    tw_define_lps(nlp_per_pe, sizeof(olsr_msg_data));

    for(i = 0; i < OLSR_END_EVENT; i++)
        g_olsr_event_stats[i] = 0;

    for (i = 0; i < SA_range_start; i++) {
        tw_lp_settype(i, &olsr_lps[0]);
    }

    for (i = SA_range_start; i < nlp_per_pe; i++) {
        tw_lp_settype(i, &olsr_lps[1]);
    }

#if DEBUG
    printf("g_tw_nlp is %llu\n", g_tw_nlp);
#endif

    tw_run();

    if( g_tw_synchronization_protocol != 1 )
    {
        MPI_Reduce( g_olsr_event_stats, g_olsr_root_event_stats, OLSR_END_EVENT, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD);
    }
    else {
        for (i = 0; i < OLSR_END_EVENT; i++) {
            g_olsr_root_event_stats[i] = g_olsr_event_stats[i];
        }
    }

    if (tw_ismaster()) {
        for( i = 0; i < OLSR_END_EVENT; i++ )
            printf("OLSR Type %s Event Count = %llu \n", event_names[i], g_olsr_root_event_stats[i]);
        printf("Complete.\n");
    }

    tw_end();

    return 0;
}
Пример #28
0
	/*
	 * This function provides the Reactive Model with a post-simulation 'main'
	 */
void
rm_run(char ** argv)
{
	FILE	*F;

	int	 i;

	// if not master, just call tw_run and return
	if(!tw_node_eq(&g_tw_mynode, &g_tw_masternode))
	{
		tw_run();
		return;
	}

#if RM_LOG_STATS
	F = fopen("reactive_model.log", "w");

	if(!F)
		tw_error(TW_LOC, "Unable to open Reactive Model logfile!");
#else
	F = stdout;
#endif

	fprintf(F, "\nReactive Model Parameters: \n");
	fprintf(F, "\n");
	fprintf(F, "\t%-50s\n", "Spatial Parameters:");
	fprintf(F, "\n");
	fprintf(F, "\t\t%-50s %11.4lf\n", "Spatial Coefficient", g_rm_spatial_coeff);
	fprintf(F, "\n");
	fprintf(F, "\t\t%-50s %11dD\n", "Dimensions Computed", g_rm_spatial_dim);

	for(i = 0; i < g_rm_spatial_dim; i++)
	{
		fprintf(F, "\t\t%-50s %11d %dD\n", "Cells per Dimension", 
						g_rm_spatial_grid[i], i+1);
		fprintf(F, "\t\t%-50s %11.4lf %dD\n", "Cell Spacing ", 
						g_rm_spatial_d[i], i+1);
	}

	fprintf(F, "\n");
	fprintf(F, "\t%-50s\n", "Temporal Parameters:");
	fprintf(F, "\n");
	fprintf(F, "\t\t%-50s %11.11lf\n", "Scatter Offset TS", g_rm_scatter_ts);
	fprintf(F, "\t\t%-50s %11.4lf\n", "Loss Coefficient", g_rm_wave_loss_coeff);
	fprintf(F, "\t\t%-50s %11.4lf\n", "Velocity", g_rm_wave_velocity);

	for(i = 0; i < g_rm_spatial_dim; i++)
		fprintf(F, "\t\t%-50s %11.11lf %dD\n", "Timestep (d/V)", 
					g_rm_spatial_offset_ts[i], i+1);
	fprintf(F, "\t\t%-50s %11.4lf\n", "Amplitude Threshold", g_rm_wave_threshold);

	tw_run();

	fprintf(F, "\nReactive Model Statistics: \n");
#if SAVE_MEM
	fprintf(F, "\tRange Statistics: \n");
	fprintf(F, "\t%-50s %11ld\n", "Range Particles Received", 
						g_rm_stats->s_nparticles);
#endif
	fprintf(F, "\tWave Statistics: \n\n");
	fprintf(F, "\t%-50s %11lld\n", "Initiate", 
						g_rm_stats->s_ncell_initiate);
	fprintf(F, "\t%-50s %11lld\n", "Scatters", 
						g_rm_stats->s_ncell_scatter);
	fprintf(F, "\t%-50s %11lld\n", "Gathers", 
						g_rm_stats->s_ncell_gather);

	//fclose(g_rm_waves_plt_f);
	//fclose(g_rm_nodes_plt_f);
	//fclose(g_rm_parts_plt_f);
}