Beispiel #1
0
int TRACE_start()
{
  if (TRACE_is_configured())
    TRACE_getopts();

  // tracing system must be:
  //    - enabled (with --cfg=tracing:yes)
  //    - already configured (TRACE_global_init already called)
  if (TRACE_is_enabled()) {

    XBT_DEBUG("Tracing starts");

    /* init the tracing module to generate the right output */
    /* open internal buffer */
    TRACE_init();

    /* open the trace file(s) */
    const char* format = sg_cfg_get_string(OPT_TRACING_FORMAT);
    XBT_DEBUG("Tracing format %s\n", format);
    if(!strcmp(format, "Paje")){
      TRACE_paje_init();
      TRACE_paje_start();
    }else if (!strcmp(format, "TI")){
      TRACE_TI_init();
      TRACE_TI_start();
    }else{
      xbt_die("Unknown trace format :%s ", format);
    }

    /* activate trace */
    if (trace_active == 1) {
      THROWF(tracing_error, 0, "Tracing is already active");
    }
    trace_active = 1;
    XBT_DEBUG("Tracing is on");

    /* other trace initialization */
    created_categories = xbt_dict_new_homogeneous(xbt_free_f);
    declared_marks = xbt_dict_new_homogeneous(xbt_free_f);
    user_host_variables = xbt_dict_new_homogeneous(xbt_free_f);
    user_vm_variables = xbt_dict_new_homogeneous(xbt_free_f);
    user_link_variables = xbt_dict_new_homogeneous(xbt_free_f);

    if (TRACE_start_functions != NULL) {
      void (*func) ();
      unsigned int iter = xbt_dynar_length(TRACE_start_functions);
      xbt_dynar_foreach(TRACE_start_functions, iter, func) {
        func();
      }
    }
  }
Beispiel #2
0
NetworkIBModel::NetworkIBModel()
 : NetworkSmpiModel() {
  m_haveGap=false;
  active_nodes=NULL;
    
  const char* IB_factors_string=sg_cfg_get_string("smpi/IB_penalty_factors");
  xbt_dynar_t radical_elements = xbt_str_split(IB_factors_string, ";");
  
  if(xbt_dynar_length(radical_elements)!=3)
    surf_parse_error("smpi/IB_penalty_factors should be provided and contain 3 elements, semi-colon separated : for example 0.965;0.925;1.35");
  
  Be = atof(xbt_dynar_get_as(radical_elements, 0, char *));
  Bs = atof(xbt_dynar_get_as(radical_elements, 1, char *));
  ys = atof(xbt_dynar_get_as(radical_elements, 2, char *));
}
Beispiel #3
0
int find_coll_description(s_mpi_coll_description_t * table,
                           char *name, const char *desc)
{
  int i;
  char *name_list = NULL;
  int selector_on=0;
  if(name==NULL){//no argument provided, use active selector's algorithm
    name=(char*)sg_cfg_get_string("smpi/coll_selector");
    selector_on=1;
  }
  for (i = 0; table[i].name; i++)
    if (!strcmp(name, table[i].name)) {
      if (strcmp(table[i].name,"default"))
        XBT_INFO("Switch to algorithm %s for collective %s",table[i].name,desc);
      return i;
    }

  if(selector_on){
    // collective seems not handled by the active selector, try with default one
    name=(char*)"default";
    for (i = 0; table[i].name; i++)
      if (!strcmp(name, table[i].name)) {
        return i;
    }
  }
  if (!table[0].name)
    xbt_die("No collective is valid for '%s'! This is a bug.",name);
  name_list = xbt_strdup(table[0].name);
  for (i = 1; table[i].name; i++) {
    name_list =
        xbt_realloc(name_list,
                    strlen(name_list) + strlen(table[i].name) + 3);
    strcat(name_list, ", ");
    strcat(name_list, table[i].name);
  }
  xbt_die("Collective '%s' is invalid! Valid collectives are: %s.", name, name_list);
  return -1;
}
Beispiel #4
0
static void smpi_init_options(){
  int gather_id = find_coll_description(mpi_coll_gather_description,
                                          sg_cfg_get_string("smpi/gather"),"gather");
    mpi_coll_gather_fun = (int (*)(void *, int, MPI_Datatype,
                                   void *, int, MPI_Datatype, int, MPI_Comm))
        mpi_coll_gather_description[gather_id].coll;

    int allgather_id = find_coll_description(mpi_coll_allgather_description,
                                             sg_cfg_get_string("smpi/allgather"),"allgather");
    mpi_coll_allgather_fun = (int (*)(void *, int, MPI_Datatype,
                                      void *, int, MPI_Datatype, MPI_Comm))
        mpi_coll_allgather_description[allgather_id].coll;

    int allgatherv_id = find_coll_description(mpi_coll_allgatherv_description,
                                              sg_cfg_get_string("smpi/allgatherv"),"allgatherv");
    mpi_coll_allgatherv_fun = (int (*)(void *, int, MPI_Datatype, void *, int *,
                                       int *, MPI_Datatype, MPI_Comm))
        mpi_coll_allgatherv_description[allgatherv_id].coll;

    int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
                                             sg_cfg_get_string("smpi/allreduce"),"allreduce");
    mpi_coll_allreduce_fun = (int (*)(void *sbuf, void *rbuf, int rcount,
                                      MPI_Datatype dtype, MPI_Op op,
                                      MPI_Comm comm))
        mpi_coll_allreduce_description[allreduce_id].coll;

    int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
                                            sg_cfg_get_string("smpi/alltoall"),"alltoall");
    mpi_coll_alltoall_fun = (int (*)(void *, int, MPI_Datatype,
                                     void *, int, MPI_Datatype, MPI_Comm))
        mpi_coll_alltoall_description[alltoall_id].coll;

    int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
                                             sg_cfg_get_string("smpi/alltoallv"),"alltoallv");
    mpi_coll_alltoallv_fun = (int (*)(void *, int *, int *, MPI_Datatype,
                                      void *, int *, int *, MPI_Datatype,
                                      MPI_Comm))
        mpi_coll_alltoallv_description[alltoallv_id].coll;

    int bcast_id = find_coll_description(mpi_coll_bcast_description,
                                         sg_cfg_get_string("smpi/bcast"),"bcast");
    mpi_coll_bcast_fun = (int (*)(void *buf, int count, MPI_Datatype datatype,
                                  int root, MPI_Comm com))
        mpi_coll_bcast_description[bcast_id].coll;

    int reduce_id = find_coll_description(mpi_coll_reduce_description,
                                          sg_cfg_get_string("smpi/reduce"),"reduce");
    mpi_coll_reduce_fun = (int (*)(void *buf, void *rbuf, int count,
                                   MPI_Datatype datatype, MPI_Op op,
                                   int root, MPI_Comm comm))
        mpi_coll_reduce_description[reduce_id].coll;

    int reduce_scatter_id =
        find_coll_description(mpi_coll_reduce_scatter_description,
                              sg_cfg_get_string("smpi/reduce_scatter"),"reduce_scatter");
    mpi_coll_reduce_scatter_fun = (int (*)(void *sbuf, void *rbuf, int *rcounts,
                                           MPI_Datatype dtype, MPI_Op op,
                                           MPI_Comm comm))
        mpi_coll_reduce_scatter_description[reduce_scatter_id].coll;

    int scatter_id = find_coll_description(mpi_coll_scatter_description,
                                           sg_cfg_get_string("smpi/scatter"),"scatter");
    mpi_coll_scatter_fun = (int (*)(void *sendbuf, int sendcount,
                                    MPI_Datatype sendtype, void *recvbuf,
                                    int recvcount, MPI_Datatype recvtype,
                                    int root, MPI_Comm comm))
        mpi_coll_scatter_description[scatter_id].coll;

    int barrier_id = find_coll_description(mpi_coll_barrier_description,
                                           sg_cfg_get_string("smpi/barrier"),"barrier");
    mpi_coll_barrier_fun = (int (*)(MPI_Comm comm))
        mpi_coll_barrier_description[barrier_id].coll;

    smpi_cpu_threshold = sg_cfg_get_double("smpi/cpu_threshold");
    smpi_running_power = sg_cfg_get_double("smpi/running_power");
    smpi_privatize_global_variables = sg_cfg_get_boolean("smpi/privatize_global_variables");
    if (smpi_cpu_threshold < 0)
      smpi_cpu_threshold = DBL_MAX;

}