Beispiel #1
0
/*
 * Estimate the time at which all the input data of a task are available (i.e.,
 * have been transfered to) on its current allocation. This estimation
 * corresponds to the maximum value among the compute parents of the task of
 * the sum of the estimated finish time of the parent and estimated transfer
 * time of the data sent by this parent. For control dependencies, the second
 * part is obviously discarded.
 */
double SD_task_estimate_last_data_arrival_time (SD_task_t task){
  unsigned int i;
  double last_data_arrival = -1., data_availability, estimated_transfer_time;
  SD_task_t parent, grand_parent;
  xbt_dynar_t parents, grand_parents;

  parents = SD_task_get_parents(task);

  xbt_dynar_foreach(parents, i, parent){
    if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) {
      grand_parents = SD_task_get_parents(parent);
      xbt_dynar_get_cpy(grand_parents, 0, &grand_parent);
      estimated_transfer_time =
          SD_task_estimate_transfer_time_from(grand_parent, task,
              SD_task_get_amount(parent));
      data_availability = SD_task_get_estimated_finish_time(grand_parent)+
          estimated_transfer_time;
      xbt_dynar_free_container(&grand_parents);
    } else {
      data_availability = SD_task_get_estimated_finish_time(parent);
    }

    if (last_data_arrival < data_availability)
      last_data_arrival = data_availability;
  }
  xbt_dynar_free_container(&parents);
  return last_data_arrival;
}
Beispiel #2
0
/* This function is actually not used by biCPA */
double top_level_recursive_computation(SD_task_t task){
  unsigned int i;
  double max_top_level = 0.0, my_top_level, current_parent_top_level = 0.0;
  SD_task_t parent, grand_parent=NULL;
  xbt_dynar_t parents, grand_parents;

  max_top_level = -1.0;

  if (!strcmp(SD_task_get_name(task),"root")){
    XBT_DEBUG("root's top level is 0.0");
    SD_task_mark(task);
    SD_task_set_top_level(task, 0.0);
    return 0.0;
  }

  parents = SD_task_get_parents(task);

  xbt_dynar_foreach(parents, i, parent){
    if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) {
      grand_parents = SD_task_get_parents(parent);
      xbt_dynar_get_cpy(grand_parents, 0, &grand_parent);
      if (SD_task_is_marked(grand_parent)){
        current_parent_top_level = SD_task_get_top_level(grand_parent) +
            SD_task_estimate_execution_time(grand_parent,
                SD_task_get_allocation_size(grand_parent));
      } else {
        current_parent_top_level =
            top_level_recursive_computation(grand_parent) +
            SD_task_estimate_execution_time(grand_parent,
                SD_task_get_allocation_size(grand_parent));
      }
      xbt_dynar_free_container(&grand_parents);
    } else {
      if (SD_task_is_marked(parent)){
        current_parent_top_level = SD_task_get_top_level(parent) +
            SD_task_estimate_execution_time(parent,
                SD_task_get_allocation_size(parent));
      } else {
        current_parent_top_level =
            top_level_recursive_computation(parent) +
            SD_task_estimate_execution_time(parent,
                SD_task_get_allocation_size(parent));
      }
    }

    if (max_top_level < current_parent_top_level)
      max_top_level = current_parent_top_level;
  }

  my_top_level = max_top_level;

  SD_task_set_top_level(task, my_top_level);
  SD_task_mark(task);
  XBT_DEBUG("%s's top level is %f", SD_task_get_name(task), my_top_level);

  xbt_dynar_free_container(&parents);

  return my_top_level;
}
Beispiel #3
0
double bottom_level_recursive_computation(SD_task_t task){
  unsigned int i;
  double my_bottom_level = 0.0, max_bottom_level,
      current_child_bottom_level = 0.0;
  SD_task_t child, grand_child;
  xbt_dynar_t children, grand_children;

  my_bottom_level = SD_task_estimate_execution_time(task,
      SD_task_get_allocation_size(task));

  max_bottom_level = -1.0;
  if (!strcmp(SD_task_get_name(task),"end")){
    XBT_DEBUG("end's bottom level is 0.0");
    SD_task_mark(task);
    SD_task_set_bottom_level(task, 0.0);
    return 0.0;
  }

  children = SD_task_get_children(task);

  xbt_dynar_foreach(children, i, child){
    if (SD_task_get_kind(child) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) {
      grand_children = SD_task_get_children(child);
      xbt_dynar_get_cpy(grand_children, 0, &grand_child);
      if (SD_task_is_marked(grand_child)){
        current_child_bottom_level = SD_task_get_bottom_level(grand_child);
      } else {
        current_child_bottom_level =
            bottom_level_recursive_computation(grand_child);
      }
      xbt_dynar_free_container(&grand_children);
    } else {
      if (SD_task_is_marked(child)){
        current_child_bottom_level = SD_task_get_bottom_level(child);
      } else {
        current_child_bottom_level =
            bottom_level_recursive_computation(child);
      }
    }

    if (max_bottom_level < current_child_bottom_level)
      max_bottom_level = current_child_bottom_level;
  }

  my_bottom_level += max_bottom_level;

  SD_task_set_bottom_level(task, my_bottom_level);
  SD_task_mark(task);
  XBT_DEBUG("%s's bottom level is %f", SD_task_get_name(task), my_bottom_level);

  xbt_dynar_free_container(&children);
  return my_bottom_level ;
}
Beispiel #4
0
static void action_finalize(const char *const *action)
{
  process_globals_t globals =
      (process_globals_t) MSG_process_get_data(MSG_process_self());
  if (globals) {
    asynchronous_cleanup();
    xbt_dynar_free_container(&(globals->isends));
    xbt_dynar_free_container(&(globals->irecvs));
    xbt_dynar_free_container(&(globals->tasks));
    xbt_free(globals);
  }
}
Beispiel #5
0
static void action_finalize(const char *const *action)
{
  smpi_replay_globals_t globals =
      (smpi_replay_globals_t) smpi_process_get_user_data();

  if (globals){
    XBT_DEBUG("There are %lu isends and %lu irecvs in the dynars",
         xbt_dynar_length(globals->isends),xbt_dynar_length(globals->irecvs));
    xbt_dynar_free_container(&(globals->isends));
    xbt_dynar_free_container(&(globals->irecvs));
  }
  free(globals);
}
static double finish_on_at(SD_task_t task, sg_host_t host)
{
  double result;
  unsigned int i;
  double data_available = 0.;
  double redist_time = 0;
  double last_data_available;

  xbt_dynar_t parents = SD_task_get_parents(task);

  if (!xbt_dynar_is_empty(parents)) {
    /* compute last_data_available */
    SD_task_t parent;
    last_data_available = -1.0;
    xbt_dynar_foreach(parents, i, parent) {
      /* normal case */
      if (SD_task_get_kind(parent) == SD_TASK_COMM_E2E) {
        xbt_dynar_t grand_parents = SD_task_get_parents(parent);
        SD_task_t grand_parent;

        xbt_assert(xbt_dynar_length(grand_parents) <2, "Error: transfer %s has 2 parents", SD_task_get_name(parent));

        xbt_dynar_get_cpy(grand_parents, 0, &grand_parent);

        sg_host_t * grand_parent_host_list = SD_task_get_workstation_list(grand_parent);
        /* Estimate the redistribution time from this parent */
        if (SD_task_get_amount(parent) <= 1e-6){
          redist_time= 0;
        } else {
          redist_time = SD_route_get_latency(grand_parent_host_list[0], host) +
                        SD_task_get_amount(parent) / SD_route_get_bandwidth(grand_parent_host_list[0], host);
        }
        data_available = SD_task_get_finish_time(grand_parent) + redist_time;

        xbt_dynar_free_container(&grand_parents);
      }

      /* no transfer, control dependency */
      if (SD_task_get_kind(parent) == SD_TASK_COMP_SEQ) {
        data_available = SD_task_get_finish_time(parent);
      }

      if (last_data_available < data_available)
        last_data_available = data_available;
    }

    xbt_dynar_free_container(&parents);

    result = MAX(sg_host_get_available_at(host), last_data_available) + SD_task_get_amount(task)/sg_host_speed(host);
  } else {
Beispiel #7
0
/* This function is actually not used by biCPA */
int precedence_level_recursive_computation(SD_task_t task){
  unsigned int i;
  int my_prec_level = -1, current_parent_prec_level = 0;
  SD_task_t parent, grand_parent=NULL;
  xbt_dynar_t parents, grand_parents;

  if (!strcmp(SD_task_get_name(task),"root")){
    XBT_DEBUG("root's precedence level is 0.0");
    SD_task_mark(task);
    SD_task_set_precedence_level(task, 0);
    return 0;
  }

  parents = SD_task_get_parents(task);

  xbt_dynar_foreach(parents, i, parent){
    if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) {
      grand_parents = SD_task_get_parents(parent);
      xbt_dynar_get_cpy(grand_parents, 0, &grand_parent);
      if (SD_task_is_marked(grand_parent)){
        current_parent_prec_level =
            SD_task_get_precedence_level(grand_parent) + 1;
      } else {
        current_parent_prec_level =
            precedence_level_recursive_computation(grand_parent) + 1;
      }
      xbt_dynar_free_container(&grand_parents);
    } else {
      if (SD_task_is_marked(parent)){
        current_parent_prec_level =
            SD_task_get_precedence_level(parent) + 1;
      } else {
        current_parent_prec_level =
            precedence_level_recursive_computation(parent) + 1;
      }
    }

    if (my_prec_level < current_parent_prec_level)
      my_prec_level = current_parent_prec_level;
  }

  SD_task_set_precedence_level(task, my_prec_level);
  SD_task_mark(task);
  XBT_DEBUG("%s's precedence level is %d", SD_task_get_name(task),
      my_prec_level);

  xbt_dynar_free_container(&parents);

  return my_prec_level;
}
Beispiel #8
0
/** @brief Destructor
 *
 * \param dynar poor victim
 *
 * kilkil a dynar and its content
 */
inline void xbt_dynar_free(xbt_dynar_t * dynar)
{
  if (dynar && *dynar) {
    xbt_dynar_reset(*dynar);
    xbt_dynar_free_container(dynar);
  }
}
Beispiel #9
0
int main(int argc, char **argv)
{
  unsigned int ctr, ctr2;
  const SD_workstation_t *workstations;
  int total_nworkstations;
  xbt_dynar_t current_storage_list;
  char *mount_name;

  SD_init(&argc, argv);
  /* Set the workstation model to default, as storage is not supported by the
   * ptask_L07 model yet.
   */
  SD_config("workstation/model", "default");
  SD_create_environment(argv[1]);
  workstations = SD_workstation_get_list();
  total_nworkstations = SD_workstation_get_number();

  for (ctr=0; ctr<total_nworkstations;ctr++){
    current_storage_list = SD_workstation_get_storage_list(workstations[ctr]);
    xbt_dynar_foreach(current_storage_list, ctr2, mount_name)
      XBT_INFO("Workstation '%s' mounts '%s'",
         SD_workstation_get_name(workstations[ctr]), mount_name);
    xbt_dynar_free_container(&current_storage_list);
  }
  SD_exit();
  return 0;
}
Beispiel #10
0
/* Build the set of the compute successors of a task that are ready (i.e., with all parents already scheduled). Both
 * data and control dependencies are checked. As more than one transfer may exist between two compute tasks, it is
 * mandatory to check whether the successor is not already in the set.
 */
xbt_dynar_t SD_task_get_ready_children(SD_task_t t){
  unsigned int i;
  xbt_dynar_t children=NULL, ready_children;
  xbt_dynar_t output_transfers = SD_task_get_children(t);
  SD_task_t output, child;

  ready_children = xbt_dynar_new(sizeof(SD_task_t), NULL);

  xbt_dynar_foreach(output_transfers, i, output){
    if (SD_task_get_kind(output) == SD_TASK_COMM_E2E) {
      /* Data dependency case: a compute task is followed by a data transfer. Its child (in a scheduling sense) is
       * then the grand child
       */
      children = SD_task_get_children(output);
      xbt_dynar_get_cpy(children, 0, &child);

      /* check if this child is already in the set */
      if (xbt_dynar_member(ready_children, &child)){
        XBT_DEBUG("%s already seen, ignore", SD_task_get_name(child));
        xbt_dynar_free_container(&children); /* avoid memory leaks */
        continue;
      }

      if (SD_task_get_kind(child) == SD_TASK_COMP_SEQ && (SD_task_get_state(child) == SD_NOT_SCHEDULED ||
           SD_task_get_state(child) == SD_SCHEDULABLE) && SD_task_is_ready(child)){
        xbt_dynar_push(ready_children, &child);
      }
      xbt_dynar_free_container(&children); /* avoid memory leaks */
    } else {
      /* Control dependency case: a compute task successor is another compute task. */
      /* check if this child is already in the set */
      if (xbt_dynar_member(ready_children, &output)){
        XBT_DEBUG("%s already seen, ignore", SD_task_get_name(output));
        continue;
      }
      if (SD_task_get_kind(output) == SD_TASK_COMP_SEQ && (SD_task_get_state(output) == SD_NOT_SCHEDULED ||
           SD_task_get_state(output) == SD_SCHEDULABLE)&& SD_task_is_ready(output)){
        xbt_dynar_push(ready_children, &output);
      }
    }
  }

  xbt_dynar_free_container(&output_transfers); /* avoid memory leaks */

  return ready_children;
}
Beispiel #11
0
/*
 * Return an estimation of the minimal time before which a task can start. This
 * time depends on the estimated finished time of the compute ancestors of the
 * task, as set when they have been scheduled. Two cases are considered,
 * depending on whether an ancestor is 'linked' to the task through a flow or
 * control dependency. Flow dependencies imply to look at the grand parents of
 * the task, while control dependencies look at the parent tasks directly.
 */
double SD_task_estimate_minimal_start_time(SD_task_t task){
  unsigned int i;
  double min_start_time=0.0;
  xbt_dynar_t parents, grand_parents;
  SD_task_t parent, grand_parent;

  parents = SD_task_get_parents(task);
  xbt_dynar_foreach(parents, i, parent){
    if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) {
      grand_parents = SD_task_get_parents(parent);
      xbt_dynar_get_cpy(grand_parents, 0, &grand_parent);
        if (SD_task_get_estimated_finish_time(grand_parent) > min_start_time)
          min_start_time = SD_task_get_estimated_finish_time(grand_parent);
      xbt_dynar_free_container(&grand_parents);
    }
    else{
      if (SD_task_get_estimated_finish_time(parent) > min_start_time)
        min_start_time = SD_task_get_estimated_finish_time(parent);
    }
  }
  xbt_dynar_free_container(&parents);
  return min_start_time;
}
Beispiel #12
0
/********************************* File **************************************/
void __MSG_file_get_info(msg_file_t fd){

  msg_file_priv_t priv = MSG_file_priv(fd);
  xbt_dynar_t info = simcall_file_get_info(priv->simdata->smx_file);
  sg_size_t *psize;

  priv->content_type = xbt_dynar_pop_as(info, char *);
  priv->storage_type = xbt_dynar_pop_as(info, char *);
  priv->storageId = xbt_dynar_pop_as(info, char *);
  priv->mount_point = xbt_dynar_pop_as(info, char *);
  psize = xbt_dynar_pop_as(info, sg_size_t*);
  priv->size = *psize;
  xbt_free(psize);
  xbt_dynar_free_container(&info);
}
Beispiel #13
0
/* Determine if a task is ready. The condition to meet is that all its compute predecessors have to be in one of the
 * following state:
 *  - SD_RUNNABLE
 *  - SD_RUNNING
 *  - SD_DONE
 */
int SD_task_is_ready(SD_task_t task){
  unsigned int i;
  int is_ready = 1;
  xbt_dynar_t parents, grand_parents;
  SD_task_t parent, grand_parent;

  parents = SD_task_get_parents(task);

  if (xbt_dynar_length(parents)) {
    xbt_dynar_foreach(parents, i, parent){
      if (SD_task_get_kind(parent) == SD_TASK_COMM_E2E) {
        /* Data dependency case: a compute task is preceded by a data transfer. Its parent (in a scheduling sense) is
         * then the grand parent
         */
        grand_parents = SD_task_get_parents(parent);
        xbt_dynar_get_cpy(grand_parents, 0, &grand_parent);
        if (SD_task_get_state(grand_parent) < SD_SCHEDULED) {
          is_ready =0;
          xbt_dynar_free_container(&grand_parents); /* avoid memory leaks */
          break;
        } else {
          xbt_dynar_free_container(&grand_parents); /* avoid memory leaks */
        }
      } else {
        /* Control dependency case: a compute task predecessor is another compute task. */
        if (SD_task_get_state(parent) < SD_SCHEDULED) {
         is_ready =0;
         break;
        }
      }
    }
  }
  xbt_dynar_free_container(&parents); /* avoid memory leaks */

  return is_ready;
}
Beispiel #14
0
int main(int argc, char **argv)
{
  unsigned int ctr;
  const SD_workstation_t *workstations;
  SD_task_t t1, c1, t2, c2, t3, c3, t4, task;
  xbt_dynar_t changed_tasks;

  SD_init(&argc, argv);
  SD_create_environment(argv[1]);
  workstations = SD_workstation_get_list();

  t1 = SD_task_create_comp_seq("t1", NULL, 25000000);
  c1 = SD_task_create_comm_e2e("c1", NULL, 125000000);
  t2 = SD_task_create_comp_seq("t2", NULL, 25000000);
  c2 = SD_task_create_comm_e2e("c2", NULL, 62500000);
  t3 = SD_task_create_comp_seq("t3", NULL, 25000000);
  c3 = SD_task_create_comm_e2e("c3", NULL, 31250000);
  t4 = SD_task_create_comp_seq("t4", NULL, 25000000);

  /* Add dependencies: t1->c1->t2->c2->t3 */
  SD_task_dependency_add(NULL, NULL, t1, c1);
  SD_task_dependency_add(NULL, NULL, c1, t2);
  SD_task_dependency_add(NULL, NULL, t2, c2);
  SD_task_dependency_add(NULL, NULL, c2, t3);
  SD_task_dependency_add(NULL, NULL, t3, c3);
  SD_task_dependency_add(NULL, NULL, c3, t4);

  /* Schedule tasks t1 and w3 on first host, t2 on second host */
  /* Transfers are auto-scheduled */
  SD_task_schedulel(t1, 1, workstations[0]);
  SD_task_schedulel(t2, 1, workstations[1]);
  SD_task_schedulel(t3, 1, workstations[0]);
  SD_task_schedulel(t4, 1, workstations[1]);

  /* Add some watchpoint upon task completion */
  SD_task_watch(t1, SD_DONE);
  SD_task_watch(c1, SD_DONE);
  SD_task_watch(t2, SD_DONE);
  SD_task_watch(c2, SD_DONE);
  SD_task_watch(t3, SD_DONE);
  SD_task_watch(c3, SD_DONE);
  SD_task_watch(t4, SD_DONE);

  while (!xbt_dynar_is_empty((changed_tasks = SD_simulate(-1.0)))) {    
    XBT_INFO("link1: bw=%.0f, lat=%f",
             SD_route_get_current_bandwidth(workstations[0], workstations[1]),
             SD_route_get_current_latency(workstations[0], workstations[1]));
    XBT_INFO("Jupiter: power=%.0f",
             SD_workstation_get_power(workstations[0])*
             SD_workstation_get_available_power(workstations[0]));
    XBT_INFO("Tremblay: power=%.0f",
             SD_workstation_get_power(workstations[1])*
             SD_workstation_get_available_power(workstations[1]));
    xbt_dynar_foreach(changed_tasks, ctr, task) {
      XBT_INFO("Task '%s' start time: %f, finish time: %f",
           SD_task_get_name(task),
           SD_task_get_start_time(task), SD_task_get_finish_time(task));
      if (SD_task_get_state(task)==SD_DONE)
        SD_task_destroy(task);
    }
    xbt_dynar_free_container(&changed_tasks);
  }
Beispiel #15
0
int main(int argc, char **argv)
{
  int i, j;
  xbt_dynar_t changed_tasks;
  int n_hosts;
  const SD_workstation_t *hosts;
  SD_task_t taskInit;
  SD_task_t PtoPComm1;
  SD_task_t PtoPComm2;
  SD_task_t ParComp_wocomm;
  SD_task_t IntraRedist;
  SD_task_t ParComp_wcomm1;
  SD_task_t InterRedist;
  SD_task_t taskFinal;
  SD_task_t ParComp_wcomm2;
  SD_workstation_t PtoPcomm1_hosts[2];
  SD_workstation_t PtoPcomm2_hosts[2];
  double PtoPcomm1_table[] = { 0, 12500000, 0, 0 };     /* 100Mb */
  double PtoPcomm2_table[] = { 0, 1250000, 0, 0 };      /* 10Mb */
  double ParComp_wocomm_cost[] = { 1e+9, 1e+9, 1e+9, 1e+9, 1e+9 };      /* 1 Gflop per Proc */
  double *ParComp_wocomm_table;
  SD_workstation_t ParComp_wocomm_hosts[5];
  double *IntraRedist_cost;
  double *IntraRedist_table;
  SD_workstation_t IntraRedist_hosts[5];
  double ParComp_wcomm1_cost[] = { 1e+9, 1e+9, 1e+9, 1e+9, 1e+9 };      /* 1 Gflop per Proc */
  double *ParComp_wcomm1_table;
  SD_workstation_t ParComp_wcomm1_hosts[5];
  double *InterRedist_cost;
  double *InterRedist_table;
  double ParComp_wcomm2_cost[] = { 1e+8, 1e+8, 1e+8, 1e+8, 1e+8 };      /* 1 Gflop per Proc (0.02sec duration) */
  SD_workstation_t ParComp_wcomm2_hosts[5];
  double final_cost = 5e+9;
  double *ParComp_wcomm2_table;

  /* initialisation of SD */
  SD_init(&argc, argv);

  /* creation of the environment */
  if (strstr(argv[1],".xml"))
    SD_create_environment(argv[1]);
  else
    xbt_die("Unsupported platform description style (not XML): %s",
            argv[1]);

  /* getting platform infos */
  n_hosts = SD_workstation_get_number();
  hosts = SD_workstation_get_list();

  /* sorting hosts by hostname */
  qsort((void *) hosts, n_hosts, sizeof(SD_workstation_t),
        nameCompareHosts);

  /* creation of the tasks */
  taskInit = SD_task_create("Initial", NULL, 1.0);
  PtoPComm1 = SD_task_create("PtoP Comm 1", NULL, 1.0);
  PtoPComm2 = SD_task_create("PtoP Comm 2", NULL, 1.0);
  ParComp_wocomm = SD_task_create("Par Comp without comm", NULL, 1.0);
  IntraRedist = SD_task_create("intra redist", NULL, 1.0);
  ParComp_wcomm1 = SD_task_create("Par Comp with comm 1", NULL, 1.0);
  InterRedist = SD_task_create("inter redist", NULL, 1.0);
  taskFinal = SD_task_create("Final", NULL, 1.0);
  ParComp_wcomm2 = SD_task_create("Par Comp with comm 2", NULL, 1.0);


  /* creation of the dependencies */
  SD_task_dependency_add(NULL, NULL, taskInit, PtoPComm1);
  SD_task_dependency_add(NULL, NULL, taskInit, PtoPComm2);
  SD_task_dependency_add(NULL, NULL, PtoPComm1, ParComp_wocomm);
  SD_task_dependency_add(NULL, NULL, ParComp_wocomm, IntraRedist);
  SD_task_dependency_add(NULL, NULL, IntraRedist, ParComp_wcomm1);
  SD_task_dependency_add(NULL, NULL, ParComp_wcomm1, InterRedist);
  SD_task_dependency_add(NULL, NULL, InterRedist, ParComp_wcomm2);
  SD_task_dependency_add(NULL, NULL, ParComp_wcomm2, taskFinal);
  SD_task_dependency_add(NULL, NULL, PtoPComm2, taskFinal);


  /* scheduling parameters */

  /* large point-to-point communication (0.1 sec duration) */
  PtoPcomm1_hosts[0] = hosts[0];
  PtoPcomm1_hosts[1] = hosts[1];

  /* small point-to-point communication (0.01 sec duration) */
  PtoPcomm2_hosts[0] = hosts[0];
  PtoPcomm2_hosts[1] = hosts[2];

  /* parallel task without intra communications (1 sec duration) */
  ParComp_wocomm_table = xbt_new0(double, 25);

  for (i = 0; i < 5; i++) {
    ParComp_wocomm_hosts[i] = hosts[i];
  }

  /* redistribution within a cluster (small latencies) */
  /* each host send (4*2.5Mb =) 10Mb */
  /* bandwidth is shared between 5 flows (0.05sec duration) */
  IntraRedist_cost = xbt_new0(double, 5);
  IntraRedist_table = xbt_new0(double, 25);
  for (i = 0; i < 5; i++) {
    for (j = 0; j < 5; j++) {
      if (i == j)
        IntraRedist_table[i * 5 + j] = 0.;
      else
        IntraRedist_table[i * 5 + j] = 312500.; /* 2.5Mb */
    }
  }

  for (i = 0; i < 5; i++) {
    IntraRedist_hosts[i] = hosts[i];
  }

  /* parallel task with intra communications */
  /* Computation domination (1 sec duration) */
  ParComp_wcomm1_table = xbt_new0(double, 25);

  for (i = 0; i < 5; i++) {
    ParComp_wcomm1_hosts[i] = hosts[i];
  }

  for (i = 0; i < 5; i++) {
    for (j = 0; j < 5; j++) {
      if (i == j)
        ParComp_wcomm1_table[i * 5 + j] = 0.;
      else
        ParComp_wcomm1_table[i * 5 + j] = 312500.;      /* 2.5Mb */
    }
  }

  /* inter cluster redistribution (big latency on the backbone) */
  /* (0.5sec duration without latency impact) */
  InterRedist_cost = xbt_new0(double, 10);
  InterRedist_table = xbt_new0(double, 100);
  for (i = 0; i < 5; i++) {
    InterRedist_table[i * 10 + i + 5] = 1250000.;       /* 10Mb */
  }

  /* parallel task with intra communications */
  /* Communication domination (0.1 sec duration) */

  ParComp_wcomm2_table = xbt_new0(double, 25);

  for (i = 0; i < 5; i++) {
    ParComp_wcomm2_hosts[i] = hosts[i + 5];
  }

  for (i = 0; i < 5; i++) {
    for (j = 0; j < 5; j++) {
      if (i == j)
        ParComp_wcomm2_table[i * 5 + j] = 0.;
      else
        ParComp_wcomm2_table[i * 5 + j] = 625000.;      /* 5Mb */
    }
  }

  /* Sequential task */


  /* scheduling the tasks */
  SD_task_schedule(taskInit, 1, hosts, SD_SCHED_NO_COST, SD_SCHED_NO_COST,
                   -1.0);
  SD_task_schedule(PtoPComm1, 2, PtoPcomm1_hosts, SD_SCHED_NO_COST,
                   PtoPcomm1_table, -1.0);
  SD_task_schedule(PtoPComm2, 2, PtoPcomm2_hosts, SD_SCHED_NO_COST,
                   PtoPcomm2_table, -1.0);
  SD_task_schedule(ParComp_wocomm, 5, ParComp_wocomm_hosts,
                   ParComp_wocomm_cost, ParComp_wocomm_table, -1.0);
  SD_task_schedule(IntraRedist, 5, IntraRedist_hosts, IntraRedist_cost,
                   IntraRedist_table, -1.0);
  SD_task_schedule(ParComp_wcomm1, 5, ParComp_wcomm1_hosts,
                   ParComp_wcomm1_cost, ParComp_wcomm1_table, -1.0);
  SD_task_schedule(InterRedist, 10, hosts, InterRedist_cost,
                   InterRedist_table, -1.0);
  SD_task_schedule(ParComp_wcomm2, 5, ParComp_wcomm2_hosts,
                   ParComp_wcomm2_cost, ParComp_wcomm2_table, -1.0);
  SD_task_schedule(taskFinal, 1, &(hosts[9]), &final_cost,
                   SD_SCHED_NO_COST, -1.0);

  /* let's launch the simulation! */
  changed_tasks = SD_simulate(-1.0);

  XBT_INFO("Simulation time: %f", SD_get_clock());

  xbt_dynar_free_container(&changed_tasks);

  free(ParComp_wocomm_table);
  free(IntraRedist_cost);
  free(IntraRedist_table);
  free(ParComp_wcomm1_table);
  free(InterRedist_cost);
  free(InterRedist_table);
  free(ParComp_wcomm2_table);

  SD_task_destroy(taskInit);
  SD_task_destroy(PtoPComm1);
  SD_task_destroy(PtoPComm2);
  SD_task_destroy(ParComp_wocomm);
  SD_task_destroy(IntraRedist);
  SD_task_destroy(ParComp_wcomm1);
  SD_task_destroy(InterRedist);
  SD_task_destroy(ParComp_wcomm2);
  SD_task_destroy(taskFinal);

  SD_exit();
  return 0;
}
Beispiel #16
0
int main(int argc, char **argv)
{
  int i;
  const char *platform_file;
  const SD_workstation_t *workstations;
  int kind;
  SD_task_t task, taskA, taskB, taskC;
  xbt_dynar_t changed_tasks;
  SD_workstation_t workstation_list[2];
  double computation_amount[2];
  double communication_amount[4] = { 0 };
  double rate = -1.0;
  SD_workstation_t w1, w2;

  /* SD initialization */
  SD_init(&argc, argv);

  /*  xbt_log_control_set("sd.thres=debug"); */

  if (argc < 2) {
    XBT_INFO("Usage: %s platform_file", argv[0]);
    XBT_INFO("example: %s sd_platform.xml", argv[0]);
    exit(1);
  }

  /* creation of the environment */
  platform_file = argv[1];
  SD_create_environment(platform_file);

  /* Change the access mode of the workstations */
  workstations = SD_workstation_get_list();
  w1 = workstations[0];
  w2 = workstations[1];
  for (i = 0; i < 2; i++) {
    SD_workstation_set_access_mode(workstations[i],
                                   SD_WORKSTATION_SEQUENTIAL_ACCESS);
    XBT_INFO("Access mode of %s is %s",
          SD_workstation_get_name(workstations[i]),
          (SD_workstation_get_access_mode(workstations[i]) ==
           SD_WORKSTATION_SEQUENTIAL_ACCESS) ? "sequential" : "shared");
  }

  /* creation of the tasks and their dependencies */
  taskA = SD_task_create_comp_seq("Task A", NULL, 2e9);
  taskB = SD_task_create_comm_e2e("Task B", NULL, 2e9);
  taskC = SD_task_create_comp_seq("Task C", NULL, 1e9);
  TRACE_category ("taskA");
  TRACE_category ("taskB");
  TRACE_category ("taskC");
  TRACE_sd_set_task_category (taskA, "taskA");
  TRACE_sd_set_task_category (taskB, "taskB");
  TRACE_sd_set_task_category (taskC, "taskC");

  /* if everything is ok, no exception is forwarded or rethrown by main() */

  /* watch points */
  SD_task_watch(taskA, SD_RUNNING);
  SD_task_watch(taskB, SD_RUNNING);
  SD_task_watch(taskC, SD_RUNNING);
  SD_task_watch(taskC, SD_DONE);


  /* scheduling parameters */
  workstation_list[0] = w1;
  workstation_list[1] = w2;
  computation_amount[0] = SD_task_get_amount(taskA);
  computation_amount[1] = SD_task_get_amount(taskB);

  communication_amount[1] = SD_task_get_amount(taskC);
  communication_amount[2] = 0.0;

  SD_task_schedule(taskA, 1, &w1,
                   &(computation_amount[0]), SD_SCHED_NO_COST, rate);
  SD_task_schedule(taskB, 2, workstation_list,
                   SD_SCHED_NO_COST, communication_amount, rate);
  SD_task_schedule(taskC, 1, &w1,
                   &(computation_amount[1]), SD_SCHED_NO_COST, rate);

  /* let's launch the simulation! */
  while (!xbt_dynar_is_empty(changed_tasks = SD_simulate(-1.0))) {
    for (i = 0; i < 2; i++) {
      task = SD_workstation_get_current_task(workstations[i]);
      if (task)
        kind = SD_task_get_kind(task);
      else {
        XBT_INFO("There is no task running on %s",
              SD_workstation_get_name(workstations[i]));
        continue;
      }

      switch (kind) {
      case SD_TASK_COMP_SEQ:
        XBT_INFO("%s is currently running on %s (SD_TASK_COMP_SEQ)",
              SD_task_get_name(task),
              SD_workstation_get_name(workstations[i]));
        break;
      case SD_TASK_COMM_E2E:
        XBT_INFO("%s is currently running on %s (SD_TASK_COMM_E2E)",
              SD_task_get_name(task),
              SD_workstation_get_name(workstations[i]));
        break;
      case SD_TASK_NOT_TYPED:
        XBT_INFO("Task running on %s has no type",
              SD_workstation_get_name(workstations[i]));
        break;
      default:
        XBT_ERROR("Shouldn't be here");
      }
    }
    xbt_dynar_free_container(&changed_tasks);
  }
  xbt_dynar_free_container(&changed_tasks);

  XBT_DEBUG("Destroying tasks...");

  SD_task_destroy(taskA);
  SD_task_destroy(taskB);
  SD_task_destroy(taskC);

  XBT_DEBUG("Tasks destroyed. Exiting SimDag...");

  SD_exit();
  return 0;
}
Beispiel #17
0
int main(int argc, char **argv)
{
  unsigned int ctr;
  const char *platform_file;
  const SD_workstation_t *workstations;
  SD_task_t task, taskA, taskB, taskC, taskD, taskE;
  xbt_dynar_t changed_tasks;

  /* initialization of SD */
  SD_init(&argc, argv);

  /*  xbt_log_control_set("sd.thres=debug"); */

  if (argc < 2) {
    XBT_INFO("Usage: %s platform_file", argv[0]);
    XBT_INFO("example: %s sd_platform.xml", argv[0]);
    exit(1);
  }

  /* creation of the environment */
  platform_file = argv[1];
  SD_create_environment(platform_file);
 
  workstations = SD_workstation_get_list();

  /* creation of some typed tasks and their dependencies */
  /* chain of five tasks, three compute tasks with two data transfers */
  /* in between */
  taskA = SD_task_create_comp_seq("Task A", NULL, 5e9);
  taskB = SD_task_create_comm_e2e("Task B", NULL, 1e7);
  taskC = SD_task_create_comp_seq("Task C", NULL, 5e9);
  taskD = SD_task_create_comm_e2e("Task D", NULL, 1e7);
  taskE = SD_task_create_comp_seq("Task E", NULL, 5e9);

  SD_task_dependency_add(NULL, NULL, taskA, taskB);
  SD_task_dependency_add(NULL, NULL, taskB, taskC);
  SD_task_dependency_add(NULL, NULL, taskC, taskD);
  SD_task_dependency_add(NULL, NULL, taskD, taskE);

  /* Add watchpoints on completion of compute tasks */
  SD_task_watch(taskA, SD_DONE);
  SD_task_watch(taskC, SD_DONE);
  SD_task_watch(taskE, SD_DONE);

  /* Auto-schedule the compute tasks on three different workstations */
  /* Data transfer tasks taskB and taskD are automagically scheduled */
  SD_task_schedulel(taskA, 1, workstations[0]);
  SD_task_schedulel(taskC, 1, workstations[1]);
  SD_task_schedulel(taskE, 1, workstations[0]);
  while (!xbt_dynar_is_empty((changed_tasks = SD_simulate(-1.0)))) {
    XBT_INFO("Simulation stopped after %.4f seconds", SD_get_clock());
    xbt_dynar_foreach(changed_tasks, ctr, task) {
      XBT_INFO("Task '%s' start time: %f, finish time: %f",
         SD_task_get_name(task),
         SD_task_get_start_time(task), 
         SD_task_get_finish_time(task));
 
    }
    /* let throttle the communication for taskD if its parent is SD_DONE */
    /* the bandwidth is 1.25e8, the data size is 1e7, and we want to throttle
     * the bandwidth by a factor 2. the rate is then 1.25e8/(2*1e7)=6.25
     * Changing the rate is possible before the task execution starts (in SD_RUNNING
     * state).
     */
    if (SD_task_get_state(taskC) == SD_DONE && SD_task_get_state(taskD) < SD_RUNNING)
      SD_task_set_rate(taskD, 6.25);
    xbt_dynar_free_container(&changed_tasks);
  }