Example #1
0
static int receiver(int argc, char *argv[])
{
  xbt_assert(argc==3, "This function expects 2 parameters from the XML deployment file");
  int id = xbt_str_parse_int(argv[1], "ID should be numerical, not %s");
  int task_amount = xbt_str_parse_int(argv[2], "Invalid amount of tasks: %s");
  msg_task_t *tasks = xbt_new(msg_task_t, task_amount);
  xbt_dynar_t comms = xbt_dynar_new(sizeof(msg_comm_t), NULL);

  char mailbox[80];
  snprintf(mailbox,79, "receiver-%d", id);
   
  MSG_process_sleep(10);
  for (int i = 0; i < task_amount; i++) {
    XBT_INFO("Wait to receive task %d", i);
    tasks[i] = NULL;
    msg_comm_t comm = MSG_task_irecv(&tasks[i], mailbox);
    xbt_dynar_push_as(comms, msg_comm_t, comm);
  }

  /* Here we are waiting for the receiving of all communications */
  while (!xbt_dynar_is_empty(comms)) {
    msg_comm_t comm;
    // MSG_comm_waitany returns the rank of the comm that just ended. Remove it.
    xbt_dynar_remove_at(comms, MSG_comm_waitany(comms), &comm);
    msg_task_t task = MSG_comm_get_task(comm);
    MSG_comm_destroy(comm);
    XBT_INFO("Processing \"%s\"", MSG_task_get_name(task));
    MSG_task_execute(task);
    XBT_INFO("\"%s\" done", MSG_task_get_name(task));
    msg_error_t err = MSG_task_destroy(task);
    xbt_assert(err == MSG_OK, "MSG_task_destroy failed");
  }
  xbt_dynar_free(&comms);
  xbt_free(tasks);

  /* Here we tell to sender that all tasks are done */
  MSG_task_send(MSG_task_create(NULL, 0, 0, NULL), "finalize");
  XBT_INFO("I'm done. See you!");
  return 0;
}
/** Emitter function  */
int master(int argc, char *argv[])
{
  int number_of_tasks = atoi(argv[1]);
  double task_comp_size = atof(argv[2]);
  double task_comm_size = atof(argv[3]);
  int slaves_count = atoi(argv[4]);

  int i;
   

  INFO2("Got %d slaves and %d tasks to process", slaves_count,number_of_tasks);
  mb = xbt_new(msg_mailbox_t, slaves_count);
  for (i = 0; i < slaves_count; i++) {
     mb[i] = MSG_mailbox_create(NULL);
  }

  for (i = 0; i < number_of_tasks; i++) {
    char sprintf_buffer[64];
    m_task_t task=NULL;

    sprintf(sprintf_buffer, "Task_%d", i);
    task = MSG_task_create(sprintf_buffer, task_comp_size, task_comm_size, NULL);
    INFO2("Sending \"%s\" to mailbox %d", task->name, i % slaves_count);
    MSG_mailbox_put_with_timeout(mb[i%slaves_count], task, -1);
    INFO0("Sent");
  }
  
/*   INFO0("All tasks have been dispatched. Let's tell everybody the computation is over."); */
/*   for (i = 0; i < slaves_count; i++) { */
/*     char mailbox[80]; */
    
/*     sprintf(mailbox,"slave-%d",i % slaves_count); */
/*     MSG_task_send(MSG_task_create("finalize", 0, 0, 0), */
/* 		  mailbox); */
/*   } */
  
  INFO0("Goodbye now!");
  return 0;
} /* end_of_master */
Example #3
0
/** Sender function  */
int sender(int argc, char *argv[])
{
  long number_of_tasks = atol(argv[1]);
  double task_comp_size = atof(argv[2]);
  double task_comm_size = atof(argv[3]);
  long receivers_count = atol(argv[4]);

  msg_comm_t *comm = xbt_new(msg_comm_t, number_of_tasks + receivers_count);
  int i;
  msg_task_t task = NULL;
  for (i = 0; i < number_of_tasks; i++) {
    char mailbox[256];
    char sprintf_buffer[256];
    sprintf(mailbox, "receiver-%ld", i % receivers_count);
    sprintf(sprintf_buffer, "Task_%d", i);
    task =
        MSG_task_create(sprintf_buffer, task_comp_size, task_comm_size,
                        NULL);
    comm[i] = MSG_task_isend(task, mailbox);
    XBT_INFO("Send to receiver-%ld Task_%d", i % receivers_count, i);
  }
  for (i = 0; i < receivers_count; i++) {
    char mailbox[80];
    sprintf(mailbox, "receiver-%ld", i % receivers_count);
    task = MSG_task_create("finalize", 0, 0, 0);
    comm[i + number_of_tasks] = MSG_task_isend(task, mailbox);
    XBT_INFO("Send to receiver-%ld finalize", i % receivers_count);

  }
  /* Here we are waiting for the completion of all communications */
  MSG_comm_waitall(comm, (number_of_tasks + receivers_count), -1);
  for (i = 0; i < number_of_tasks + receivers_count; i++)
    MSG_comm_destroy(comm[i]);

  XBT_INFO("Goodbye now!");
  xbt_free(comm);
  return 0;
}                               /* end_of_sender */
Example #4
0
static void send_one(int from, int to) {
  //XBT_DEBUG("send_one(%d, %d)",from,to);

  if (count %100000 == 0)
    XBT_INFO("Sending task #%d",count);
  count++;

  bcast_task_t bt;
  if (!xbt_dynar_is_empty(reclaimed)) {
     bt = xbt_dynar_pop_as(reclaimed,bcast_task_t);
  } else {
    bt = xbt_new(s_bcast_task_t,1);
  }
  bt->i=from;
  bt->j=(from+to)/2;
  bt->k=to;

  SD_task_t task = SD_task_create_comm_e2e(NULL,bt,424242);

  XBT_DEBUG("Schedule task between %d and %d",bt->i,bt->j);
  SD_task_schedulel(task,2,ws_list[bt->i],ws_list[bt->j]);
  SD_task_watch(task,SD_DONE);
}
Example #5
0
int smpi_sample_1(int global, const char *file, int line, int iters, double threshold)
{
  char *loc = sample_location(global, file, line);
  local_data_t *data;

  smpi_bench_end();     /* Take time from previous MPI call into account */
  if (!samples) {
    samples = xbt_dict_new();
  }
  data = xbt_dict_get_or_null(samples, loc);
  if (!data) {
    data = (local_data_t *) xbt_new(local_data_t, 1);
    data->count = 0;
    data->sum = 0.0;
    data->sum_pow2 = 0.0;
    data->iters = iters;
    data->threshold = threshold;
    data->started = 0;
    xbt_dict_set(samples, loc, data, &free);
    return 0;
  }
  free(loc);
  return 1;
}
Example #6
0
xbt_dict_t Storage::parseContent(char *filename)
{
  m_usedSize = 0;
  if ((!filename) || (strcmp(filename, "") == 0))
    return NULL;

  xbt_dict_t parse_content = xbt_dict_new_homogeneous(xbt_free_f);
  FILE *file = NULL;

  file = surf_fopen(filename, "r");
  if (file == NULL)
    xbt_die("Cannot open file '%s' (path=%s)", filename,
            xbt_str_join(surf_path, ":"));

  char *line = NULL;
  size_t len = 0;
  ssize_t read;
  char path[1024];
  sg_size_t size;

  while ((read = xbt_getline(&line, &len, file)) != -1) {
    if (read){
      if(sscanf(line,"%s %llu", path, &size) == 2) {
        m_usedSize += size;
        sg_size_t *psize = xbt_new(sg_size_t, 1);
        *psize = size;
        xbt_dict_set(parse_content,path,psize,NULL);
      } else {
        xbt_die("Be sure of passing a good format for content file.\n");
      }
    }
  }
  free(line);
  fclose(file);
  return parse_content;
}
Example #7
0
void smpi_sample_1(int global, const char *file, int line, int iters, double threshold)
{
  char *loc = sample_location(global, file, line);
  local_data_t *data;

  smpi_bench_end();     /* Take time from previous, unrelated computation into account */
  if (!samples)
    samples = xbt_dict_new_homogeneous(free);

  data = xbt_dict_get_or_null(samples, loc);
  if (!data) {
    xbt_assert(threshold>0 || iters>0,
        "You should provide either a positive amount of iterations to bench, or a positive maximal stderr (or both)");
    data = (local_data_t *) xbt_new(local_data_t, 1);
    data->count = 0;
    data->sum = 0.0;
    data->sum_pow2 = 0.0;
    data->iters = iters;
    data->threshold = threshold;
    data->benching = 1; // If we have no data, we need at least one
    data->mean = 0;
    xbt_dict_set(samples, loc, data, NULL);
    XBT_DEBUG("XXXXX First time ever on benched nest %s.",loc);
  } else {
    if (data->iters != iters || data->threshold != threshold) {
      XBT_ERROR("Asked to bench block %s with different settings %d, %f is not %d, %f. How did you manage to give two numbers at the same line??",
          loc, data->iters, data->threshold, iters,threshold);
      THROW_IMPOSSIBLE;
    }

    // if we already have some data, check whether sample_2 should get one more bench or whether it should emulate the computation instead
    data->benching = !sample_enough_benchs(data);
    XBT_DEBUG("XXXX Re-entering the benched nest %s. %s",loc, (data->benching?"more benching needed":"we have enough data, skip computes"));
  }
  free(loc);
}
int smpi_coll_tuned_bcast_ompi_pipeline( void* buffer,
                                      int original_count, 
                                      MPI_Datatype datatype, 
                                      int root,
                                      MPI_Comm comm)
{
    int count_by_segment = original_count;
    size_t type_size;
    int segsize =1024  << 7;
    //mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
    //mca_coll_tuned_comm_t *data = tuned_module->tuned_data;
    
//    return ompi_coll_tuned_bcast_intra_generic( buffer, count, datatype, root, comm, module,
//                                                count_by_segment, data->cached_pipeline );
    ompi_coll_tree_t * tree = ompi_coll_tuned_topo_build_chain( 1, comm, root );
    int i;
    int rank, size;
    int segindex;
    int num_segments; /* Number of segments */
    int sendcount;    /* number of elements sent in this segment */ 
    size_t realsegsize;
    char *tmpbuf;
    ptrdiff_t extent;
    MPI_Request recv_reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
    MPI_Request *send_reqs = NULL;
    int req_index;
    
    /**
     * Determine number of elements sent per operation.
     */
    type_size = smpi_datatype_size(datatype);

    size = smpi_comm_size(comm);
    rank = smpi_comm_rank(comm);
    xbt_assert( size > 1 );


    const double a_p16  = 3.2118e-6; /* [1 / byte] */
    const double b_p16  = 8.7936;   
    const double a_p64  = 2.3679e-6; /* [1 / byte] */
    const double b_p64  = 1.1787;     
    const double a_p128 = 1.6134e-6; /* [1 / byte] */
    const double b_p128 = 2.1102;
    size_t message_size;

    /* else we need data size for decision function */
    message_size = type_size * (unsigned long)original_count;   /* needed for decision */

    if (size < (a_p128 * message_size + b_p128)) {
            //Pipeline with 128KB segments 
            segsize = 1024  << 7;
    }else if (size < (a_p64 * message_size + b_p64)) {
            // Pipeline with 64KB segments 
            segsize = 1024 << 6;
    }else if (size < (a_p16 * message_size + b_p16)) {
            //Pipeline with 16KB segments 
            segsize = 1024 << 4;
    }

    COLL_TUNED_COMPUTED_SEGCOUNT( segsize, type_size, count_by_segment );

    XBT_DEBUG("coll:tuned:bcast_intra_pipeline rank %d ss %5d type_size %lu count_by_segment %d",
                 smpi_comm_rank(comm), segsize, (unsigned long)type_size, count_by_segment);



    extent = smpi_datatype_get_extent (datatype);
    num_segments = (original_count + count_by_segment - 1) / count_by_segment;
    realsegsize = count_by_segment * extent;
    
    /* Set the buffer pointers */
    tmpbuf = (char *) buffer;

    if( tree->tree_nextsize != 0 ) {
        send_reqs = xbt_new(MPI_Request, tree->tree_nextsize  );
    }

    /* Root code */
    if( rank == root ) {
        /* 
           For each segment:
           - send segment to all children.
             The last segment may have less elements than other segments.
        */
        sendcount = count_by_segment;
        for( segindex = 0; segindex < num_segments; segindex++ ) {
            if( segindex == (num_segments - 1) ) {
                sendcount = original_count - segindex * count_by_segment;
            }
            for( i = 0; i < tree->tree_nextsize; i++ ) { 
                send_reqs[i] = smpi_mpi_isend(tmpbuf, sendcount, datatype,
                                         tree->tree_next[i], 
                                         COLL_TAG_BCAST, comm);
           } 

            /* complete the sends before starting the next sends */
            smpi_mpi_waitall( tree->tree_nextsize, send_reqs, 
                                         MPI_STATUSES_IGNORE );

            /* update tmp buffer */
            tmpbuf += realsegsize;

        }
    } 
    
    /* Intermediate nodes code */
    else if( tree->tree_nextsize > 0 ) { 
        /* 
           Create the pipeline. 
           1) Post the first receive
           2) For segments 1 .. num_segments
              - post new receive
              - wait on the previous receive to complete
              - send this data to children
           3) Wait on the last segment
           4) Compute number of elements in last segment.
           5) Send the last segment to children
         */
        req_index = 0;
        recv_reqs[req_index]=smpi_mpi_irecv(tmpbuf, count_by_segment, datatype,
                           tree->tree_prev, COLL_TAG_BCAST,
                           comm);
        
        for( segindex = 1; segindex < num_segments; segindex++ ) {
            
            req_index = req_index ^ 0x1;
            
            /* post new irecv */
            recv_reqs[req_index]= smpi_mpi_irecv( tmpbuf + realsegsize, count_by_segment,
                                datatype, tree->tree_prev, 
                                COLL_TAG_BCAST,
                                comm);
            
            /* wait for and forward the previous segment to children */
            smpi_mpi_wait( &recv_reqs[req_index ^ 0x1], 
                                     MPI_STATUSES_IGNORE );
            
            for( i = 0; i < tree->tree_nextsize; i++ ) { 
                send_reqs[i]=smpi_mpi_isend(tmpbuf, count_by_segment, datatype,
                                         tree->tree_next[i], 
                                         COLL_TAG_BCAST, comm );
            } 
            
            /* complete the sends before starting the next iteration */
            smpi_mpi_waitall( tree->tree_nextsize, send_reqs, 
                                         MPI_STATUSES_IGNORE );
            
            /* Update the receive buffer */
            tmpbuf += realsegsize;
        }

        /* Process the last segment */
        smpi_mpi_wait( &recv_reqs[req_index], MPI_STATUSES_IGNORE );
        sendcount = original_count - (num_segments - 1) * count_by_segment;
        for( i = 0; i < tree->tree_nextsize; i++ ) {
            send_reqs[i] = smpi_mpi_isend(tmpbuf, sendcount, datatype,
                                     tree->tree_next[i], 
                                     COLL_TAG_BCAST, comm);
        }
        
        smpi_mpi_waitall( tree->tree_nextsize, send_reqs, 
                                     MPI_STATUSES_IGNORE );
    }
  
    /* Leaf nodes */
    else {
        /* 
           Receive all segments from parent in a loop:
           1) post irecv for the first segment
           2) for segments 1 .. num_segments
              - post irecv for the next segment
              - wait on the previous segment to arrive
           3) wait for the last segment
        */
        req_index = 0;
        recv_reqs[req_index] = smpi_mpi_irecv(tmpbuf, count_by_segment, datatype,
                                 tree->tree_prev, COLL_TAG_BCAST,
                                 comm);

        for( segindex = 1; segindex < num_segments; segindex++ ) {
            req_index = req_index ^ 0x1;
            tmpbuf += realsegsize;
            /* post receive for the next segment */
            recv_reqs[req_index] = smpi_mpi_irecv(tmpbuf, count_by_segment, datatype, 
                                     tree->tree_prev, COLL_TAG_BCAST,
                                     comm);
            /* wait on the previous segment */
            smpi_mpi_wait( &recv_reqs[req_index ^ 0x1], 
                                     MPI_STATUS_IGNORE );
        }

        smpi_mpi_wait( &recv_reqs[req_index], MPI_STATUS_IGNORE );
    }

    if( NULL != send_reqs ) free(send_reqs);

    return (MPI_SUCCESS);
}
Example #9
0
/**
 * Alltoall Bruck
 *
 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
 * less...
 **/
int smpi_coll_tuned_alltoallv_bruck(void *sendbuf, int *sendcounts, int *senddisps,
                                   MPI_Datatype sendtype, void *recvbuf,
                                   int *recvcounts, int *recvdisps, MPI_Datatype recvtype,
                                   MPI_Comm comm)
{
  int system_tag = 777;
  int i, rank, size, err, count;
  MPI_Aint lb;
  MPI_Aint sendext = 0;
  MPI_Aint recvext = 0;
  MPI_Request *requests;

  // FIXME: check implementation
  rank = smpi_comm_rank(comm);
  size = smpi_comm_size(comm);
  XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);

  err = smpi_datatype_extent(sendtype, &lb, &sendext);
  err = smpi_datatype_extent(recvtype, &lb, &recvext);
  /* Local copy from self */
  err =
      smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext, 
                         sendcounts[rank], sendtype, 
                         (char *)recvbuf + recvdisps[rank] * recvext,
                         recvcounts[rank], recvtype);
  if (err == MPI_SUCCESS && size > 1) {
    /* Initiate all send/recv to/from others. */
    requests = xbt_new(MPI_Request, 2 * (size - 1));
    count = 0;
    /* Create all receives that will be posted first */
    for (i = 0; i < size; ++i) {
      if (i == rank) {
        XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
               rank, i, recvcounts[i]);
        continue;
      }
      requests[count] =
          smpi_irecv_init((char *)recvbuf + recvdisps[i] * recvext, recvcounts[i],
                          recvtype, i, system_tag, comm);
      count++;
    }
    /* Now create all sends  */
    for (i = 0; i < size; ++i) {
      if (i == rank) {
        XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
               rank, i, sendcounts[i]);
        continue;
      }
      requests[count] =
          smpi_isend_init((char *)sendbuf + senddisps[i] * sendext, sendcounts[i],
                          sendtype, i, system_tag, comm);
      count++;
    }
    /* Wait for them all. */
    smpi_mpi_startall(count, requests);
    XBT_DEBUG("<%d> wait for %d requests", rank, count);
    smpi_mpi_waitall(count, requests, MPI_STATUS_IGNORE);
    xbt_free(requests);
  }
  return MPI_SUCCESS;
}
Example #10
0
/**
 * Alltoall Bruck
 *
 * Openmpi calls this routine when the message size sent to each rank < 2000 bytes and size < 12
 * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
 * less...
 **/
int smpi_coll_tuned_alltoallv_bruck(void *sendbuf, int *sendcounts, int *senddisps,
                                   MPI_Datatype sendtype, void *recvbuf,
                                   int *recvcounts, int *recvdisps, MPI_Datatype recvtype,
                                   MPI_Comm comm)
{
  int system_tag = COLL_TAG_ALLTOALLV;
  int i, rank, size, err, count;
  MPI_Aint lb;
  MPI_Aint sendext = 0;
  MPI_Aint recvext = 0;
  MPI_Request *requests;

  // FIXME: check implementation
  rank = smpi_comm_rank(comm);
  size = smpi_comm_size(comm);
  XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank);

  smpi_datatype_extent(sendtype, &lb, &sendext);
  smpi_datatype_extent(recvtype, &lb, &recvext);
  /* Local copy from self */
  err =
      smpi_datatype_copy((char *)sendbuf + senddisps[rank] * sendext,
                         sendcounts[rank], sendtype,
                         (char *)recvbuf + recvdisps[rank] * recvext,
                         recvcounts[rank], recvtype);
  if (err == MPI_SUCCESS && size > 1) {
    /* Initiate all send/recv to/from others. */

      int bblock = 4;//MPIR_PARAM_ALLTOALL_THROTTLE
      //if (bblock == 0) bblock = comm_size;


     // requests = xbt_new(MPI_Request, 2 * (bblock - 1));
      int ii, ss, dst;
      /* post only bblock isends/irecvs at a time as suggested by Tony Ladd */
      for (ii=0; ii<size; ii+=bblock) {
          requests = xbt_new(MPI_Request, 2 * (bblock ));

          ss = size-ii < bblock ? size-ii : bblock;
          count = 0;

          /* do the communication -- post ss sends and receives: */
          for ( i=0; i<ss; i++ ) {
            dst = (rank+i+ii) % size;
              if (dst == rank) {
                XBT_DEBUG("<%d> skip request creation [src = %d, recvcount = %d]",
                       rank, i, recvcounts[dst]);
                continue;
              }

              requests[count]=smpi_mpi_irecv((char *)recvbuf + recvdisps[dst] * recvext, recvcounts[dst],
                                  recvtype, dst, system_tag, comm );
              count++;
            }
            /* Now create all sends  */
          for ( i=0; i<ss; i++ ) {
              dst = (rank-i-ii+size) % size;
              if (dst == rank) {
                XBT_DEBUG("<%d> skip request creation [dst = %d, sendcount = %d]",
                       rank, i, sendcounts[dst]);
                continue;
              }
              requests[count]=smpi_mpi_isend((char *)sendbuf + senddisps[dst] * sendext, sendcounts[dst],
                                  sendtype, dst, system_tag, comm);
              count++;
            }
            /* Wait for them all. */
            //smpi_mpi_startall(count, requests);
            XBT_DEBUG("<%d> wait for %d requests", rank, count);
            smpi_mpi_waitall(count, requests, MPI_STATUSES_IGNORE);
            xbt_free(requests);

          }

  }
  return MPI_SUCCESS;
}
Example #11
0
    w_mra_info_t       wi;
    xbt_dynar_t    process_list;

    /* Initialize hosts information. */

    config_mra.mra_number_of_workers = 0;

    process_list = MSG_processes_as_dynar ();
    xbt_dynar_foreach (process_list, cursor, process)
    {
	process_name = MSG_process_get_name (process);
	if ( strcmp (process_name, "worker_mra") == 0 )
	    config_mra.mra_number_of_workers++;
    }

    config_mra.workers_mra = xbt_new (msg_host_t, config_mra.mra_number_of_workers);

    mra_wid = 0;
    config_mra.grid_cpu_power = 0.0;
    xbt_dynar_foreach (process_list, cursor, process)
    {
	process_name = MSG_process_get_name (process);
	host = MSG_process_get_host (process);
	if ( strcmp (process_name, "worker_mra") == 0 )
	{
	    config_mra.workers_mra[mra_wid] = host;
	    /* Set the worker ID as its data. */
	    wi = xbt_new (struct mra_w_info_s, 1);
	    wi->mra_wid = mra_wid;
	    MSG_host_set_data (host, (void*)wi);
	    /* Add the worker's cpu power to the grid total. */
Example #12
0
    w_info_t       wi;
    xbt_dynar_t    process_list;

    /* Initialize hosts information. */

    config.number_of_workers = 0;

    process_list = MSG_processes_as_dynar ();
    xbt_dynar_foreach (process_list, cursor, process)
    {
	process_name = MSG_process_get_name (process);
	if ( strcmp (process_name, "worker") == 0 )
	    config.number_of_workers++;
    }

    config.workers = xbt_new (msg_host_t, config.number_of_workers);

    wid = 0;
    config.grid_cpu_power = 0.0;
    xbt_dynar_foreach (process_list, cursor, process)
    {
	process_name = MSG_process_get_name (process);
	host = MSG_process_get_host (process);
	if ( strcmp (process_name, "worker") == 0 )
	{
	    config.workers[wid] = host;
	    /* Set the worker ID as its data. */
	    wi = xbt_new (struct w_info_s, 1);
	    wi->wid = wid;
	    MSG_host_set_data (host, (void*)wi);
	    /* Add the worker's cpu power to the grid total. */
Example #13
0
static void *lmm_variable_mallocator_new_f(void)
{
  lmm_variable_t var = xbt_new(s_lmm_variable_t, 1);
  var->cnsts = NULL; /* will be created by realloc */
  return var;
}
Example #14
0
    w_mrsg_info_t       wi;
    xbt_dynar_t    process_list;

    /* Initialize hosts information. */

    config_mrsg.mrsg_number_of_workers = 0;

    process_list = MSG_processes_as_dynar ();
    xbt_dynar_foreach (process_list, cursor, process)
    {
	process_name = MSG_process_get_name (process);
	if ( strcmp (process_name, "worker_mrsg") == 0 )
	    config_mrsg.mrsg_number_of_workers++;
    }

    config_mrsg.workers_mrsg = xbt_new (msg_host_t, config_mrsg.mrsg_number_of_workers);

    mrsg_wid = 0;
    config_mrsg.grid_cpu_power = 0.0;
    xbt_dynar_foreach (process_list, cursor, process)
    {
	process_name = MSG_process_get_name (process);
	host = MSG_process_get_host (process);
	if ( strcmp (process_name, "worker_mrsg") == 0 )
	{
	    config_mrsg.workers_mrsg[mrsg_wid] = host;
	    /* Set the worker ID as its data. */
	    wi = xbt_new (struct mrsg_w_info_s, 1);
	    wi->mrsg_wid = mrsg_wid;
	    MSG_host_set_data (host, (void*)wi);
	    /* Add the worker's cpu power to the grid total. */
Example #15
0
task_t new_task(task_type_t e_type, msched_args_t ps_args, task_t p_ancestor, unsigned int ui_a1, unsigned int ui_a2, unsigned int ui_a3, unsigned int ui_a4)
{
  task_t p_task = (task_t)xbt_new(s_task_t,1);
  p_task->uc_done = 0;
  p_task->p_proc_map = NULL;
  p_task->e_type = e_type;
  p_task->ui_tile_size = ps_args->ui_tile_size;
  p_task->a_ancestors = xbt_dynar_new(sizeof(task_t),NULL);
  
  switch (e_type) {
    case F:
      p_task->task.F.ui_i = ui_a1;
      p_task->task.F.ui_j = ui_a2;
      if (ps_args->uc_coarse) {
        p_task->f_unit_cost = 0;  
      }
      else {
        p_task->f_unit_cost = 4;        
      }

      sprintf(p_task->name,"F(%u,%u)",ui_a1,ui_a2);
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.F.ui_i*ps_args->ui_q+p_task->task.F.ui_j], p_task);
      break;
    case H:
      p_task->task.H.ui_i = ui_a1;
      p_task->task.H.ui_j = ui_a2;
      p_task->task.H.ui_k = ui_a3;
      if (ps_args->uc_coarse) {
      p_task->f_unit_cost = 0;
      }else{
      p_task->f_unit_cost = 6;
      }    
      sprintf(p_task->name,"H(%u,%u,%u)",ui_a1,ui_a2,ui_a3);
      /*place the update in the tile fifo*/
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.H.ui_i*ps_args->ui_q+p_task->task.H.ui_k], p_task);      
      xbt_dynar_push_as(p_task->a_ancestors,task_t,p_ancestor);
      break;
    case Z:
      p_task->task.Z.ui_i = ui_a1;
      p_task->task.Z.ui_ii = ui_a2;
      p_task->task.Z.ui_j = ui_a3;
      if (ps_args->uc_coarse) {
      p_task->f_unit_cost = 1;
      }else{
      p_task->f_unit_cost = 2;
      }
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.Z.ui_i*ps_args->ui_q+p_task->task.Z.ui_j], p_task);
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.Z.ui_ii*ps_args->ui_q+p_task->task.Z.ui_j], p_task);
      sprintf(p_task->name,"Z(%u,%u,%u)",ui_a1,ui_a2,ui_a3);
      break;
    case ZS:
      p_task->task.Z.ui_i = ui_a1;
      p_task->task.Z.ui_ii = ui_a2;
      p_task->task.Z.ui_j = ui_a3;
      if (ps_args->uc_coarse) {
      p_task->f_unit_cost = 1;
      }else{
      p_task->f_unit_cost = 6;
      }
      /*place the update in the tile fifo*/
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.Z.ui_i*ps_args->ui_q+p_task->task.Z.ui_j], p_task);
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.Z.ui_ii*ps_args->ui_q+p_task->task.Z.ui_j], p_task);
      sprintf(p_task->name,"ZS(%u,%u,%u)",ui_a1,ui_a2,ui_a3);      
      break;      
    case V:
      p_task->task.V.ui_i = ui_a1;
      p_task->task.V.ui_ii = ui_a2;
      p_task->task.V.ui_j = ui_a3;
      p_task->task.V.ui_k = ui_a4;      
      if (ps_args->uc_coarse) {
        if(ps_args->fptr_handle_finish!=NULL){
          p_task->f_unit_cost = 0;
        }
        else{
          p_task->f_unit_cost = 1;
        }
      }else{
      p_task->f_unit_cost = 6;
      }
      xbt_dynar_push_as(p_task->a_ancestors,task_t,p_ancestor);
      /*place the update in the tile fifo*/
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.V.ui_i*ps_args->ui_q+p_task->task.V.ui_k], p_task);
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.V.ui_ii*ps_args->ui_q+p_task->task.V.ui_k], p_task);
      sprintf(p_task->name,"V(%u,%u,%u,%u)",ui_a1,ui_a2,ui_a3,ui_a4);
      break;
    case VS:
      p_task->task.V.ui_i = ui_a1;
      p_task->task.V.ui_ii = ui_a2;
      p_task->task.V.ui_j = ui_a3;
      p_task->task.V.ui_k = ui_a4;     
      if (ps_args->uc_coarse) {
        if(ps_args->fptr_handle_finish!=NULL){
          p_task->f_unit_cost = 0;
        }
        else{
          p_task->f_unit_cost = 1;
        }
      }else{
      p_task->f_unit_cost = 12;
      }
      xbt_dynar_push_as(p_task->a_ancestors,task_t,p_ancestor);
      /*place the update in the tile fifo*/
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.V.ui_i*ps_args->ui_q+p_task->task.V.ui_k], p_task);
      xbt_fifo_push(ps_args->a_tile_updates[p_task->task.V.ui_ii*ps_args->ui_q+p_task->task.V.ui_k], p_task);
      sprintf(p_task->name,"VS(%u,%u,%u,%u)",ui_a1,ui_a2,ui_a3,ui_a4);
      break;      
    default:
      exit(-1);
      break;
  }
  
  operation_cnt+=ceil(p_task->f_unit_cost);
  xbt_dynar_push_as(ps_args->a_ET, task_t,p_task);
  xbt_fifo_push(ps_args->a_general_workqueue, p_task);
  

  return p_task;
}
Example #16
0
  for (int i = 0; i < test_amount; i++) {
    if (more_info)
      XBT_INFO("%03d (%02d|%02d|%02d|%02d|%02d|%02d|%02d|%02d|%02d)",
             test_amount - i, id, id, id, id, id, id, id, id, id);
    else
      XBT_INFO("XXX (XX|XX|XX|XX|XX|XX|XX|XX|XX)");
  }
  return NULL;
}

static int crasher()
{
  /* initializations of the philosopher mechanisms */
  id = xbt_new0(int, crasher_amount);
  xbt_os_thread_t* crashers = xbt_new(xbt_os_thread_t, crasher_amount);

  for (int i = 0; i < crasher_amount; i++)
    id[i] = i;

  /* spawn threads */
  for (int i = 0; i < crasher_amount; i++) {
    char name[16];
    snprintf(name, sizeof name, "thread %d", i);
    crashers[i] = xbt_os_thread_create(name, &crasher_thread, &id[i], NULL );
  }

  /* wait for them */
  for (int i = 0; i < crasher_amount; i++)
    xbt_os_thread_join(crashers[i],NULL);
Example #17
0
/* Master Process */
int master(int argc, char *argv[])
{
    char * key;
    struct HdmsgHost *hdmsg_host;
    xbt_dict_cursor_t cursor = NULL;
    
    int i = 0;
    
    long remaining_inits = 0;
    long remaining_mappers = 0;
    long remaining_shufflers = 0;
    long remaining_reducers = 0;
    long expected_messages = 0;
    
    msg_comm_t res_irecv;
    msg_task_t task_com;
    msg_task_t *tasks = xbt_new(msg_task_t, number_of_workers);
    xbt_dynar_t comms = xbt_dynar_new(sizeof(msg_comm_t), NULL);
    
    XBT_INFO("INITIALIZATION BEGIN");
    
    // Initialize processes (mappers, shufflers, and reducers) on each host
    xbt_dict_foreach(hosts, cursor, key, hdmsg_host)
    {
        if (hdmsg_host->is_worker)
        {
            MSG_process_create("Init", initializeProcs, NULL, hdmsg_host->host);
            
            tasks[remaining_inits] = NULL;
            res_irecv = MSG_task_irecv(&tasks[remaining_inits], "master");
            xbt_dynar_push_as(comms, msg_comm_t, res_irecv);
            remaining_inits++;
        }
    }
    
    while (!xbt_dynar_is_empty(comms))
    {
        xbt_dynar_remove_at(comms, MSG_comm_waitany(comms), &res_irecv);
        task_com = MSG_comm_get_task(res_irecv);
        
        if (!strcmp(MSG_task_get_name(task_com), "init_exit"))
        {
            msg_host_t h = MSG_task_get_source(task_com);
            MSG_task_destroy(task_com);
            
            const char *host_name = MSG_host_get_name(h);
            struct HdmsgHost *hdmsg_host = xbt_dict_get(hosts, host_name);
            
            remaining_mappers += get_mapper_count(hdmsg_host);
            remaining_shufflers += get_shuffler_count(hdmsg_host);
            remaining_reducers += get_reducer_count(hdmsg_host);
            
            remaining_inits--;
            
            if (remaining_inits == 0)
            {
                XBT_INFO("INITIALIZATION COMPLETE");
                
                // Add an extra message to account for the message sent when the shuffle phase begins
                expected_messages = 1 + remaining_mappers + remaining_shufflers + remaining_reducers;
                
                free(tasks);
                tasks = xbt_new(msg_task_t, expected_messages);
                
                for (i = 0; i < expected_messages; i++)
                {
                    tasks[i] = NULL;
                    res_irecv = MSG_task_irecv(&tasks[i], "master");
                    xbt_dynar_push_as(comms, msg_comm_t, res_irecv);
                }
                
                XBT_INFO("MAP PHASE BEGIN");
                
                // Activate Mappers
                xbt_dict_foreach(hosts, cursor, key, hdmsg_host)
                {
                    activate_mappers(hdmsg_host);
                }
            }