示例#1
0
static void replace_lat_ns3(char ** lat)
{
  char *temp = xbt_strdup(*lat);
  xbt_free(*lat);
  *lat = bprintf("%fs",atof(temp));
  xbt_free(temp);
}
示例#2
0
static void vivaldi_get_route_and_latency(
    AS_t rc, sg_routing_edge_t src_p, sg_routing_edge_t dst_p,
    sg_platf_route_cbarg_t route, double *lat)
{
  s_surf_parsing_link_up_down_t info;

  XBT_DEBUG("vivaldi_get_route_and_latency from '%s'[%d] '%s'[%d]",src_p->name,src_p->id,dst_p->name,dst_p->id);
  char *src = (char*)src_p->name;
  char *dst = (char*)dst_p->name;

  if(src_p->rc_type == SURF_NETWORK_ELEMENT_AS) {
    char *rp_src = ROUTER_PEER(src);
    char *rp_dst = ROUTER_PEER(dst);
    route->gw_src = xbt_lib_get_or_null(as_router_lib, rp_src,
                                        ROUTING_ASR_LEVEL);
    route->gw_dst = xbt_lib_get_or_null(as_router_lib, rp_dst,
                                        ROUTING_ASR_LEVEL);
    xbt_free(rp_src);
    xbt_free(rp_dst);
  }

  double euclidean_dist;
  xbt_dynar_t src_ctn, dst_ctn;
  char *tmp_src_name, *tmp_dst_name;

  if(src_p->rc_type == SURF_NETWORK_ELEMENT_HOST){
    tmp_src_name = HOST_PEER(src);

    if(rc->link_up_down_list){
      info = xbt_dynar_get_as(rc->link_up_down_list,src_p->id,s_surf_parsing_link_up_down_t);
      if(info.link_up) { // link up
        xbt_dynar_push_as(route->link_list,void*,info.link_up);
        if (lat)
          *lat += surf_network_model->extension.network.get_link_latency(info.link_up);
      }
int crasher(int argc, char *argv[])
{
  int i;
  xbt_os_thread_t *crashers;

  xbt_init(&argc, argv);

  /* initializations of the philosopher mecanisms */
  id = xbt_new0(int, crasher_amount);
  crashers = xbt_new(xbt_os_thread_t, crasher_amount);

  for (i = 0; i < crasher_amount; i++)
    id[i] = i;

  /* spawn threads */
  for (i = 0; i < crasher_amount; i++) {
    char *name = bprintf("thread %d", i);
    crashers[i] =
        xbt_os_thread_create(name, &crasher_thread, &id[i], NULL );
    free(name);
  }

  /* wait for them */
  for (i = 0; i < crasher_amount; i++)
    xbt_os_thread_join(crashers[i],NULL);

  xbt_free(crashers);
  xbt_free(id);

  return 0;
}
void AsVivaldi::getRouteAndLatency(RoutingEdge *src, RoutingEdge *dst, sg_platf_route_cbarg_t route, double *lat)
{
  s_surf_parsing_link_up_down_t info;

  XBT_DEBUG("vivaldi_get_route_and_latency from '%s'[%d] '%s'[%d]",
		  src->getName(), src->getId(), dst->getName(), dst->getId());

  if(src->getRcType() == SURF_NETWORK_ELEMENT_AS) {
    char *src_name = ROUTER_PEER(src->getName());
    char *dst_name = ROUTER_PEER(dst->getName());
    route->gw_src = (sg_routing_edge_t) xbt_lib_get_or_null(as_router_lib, src_name, ROUTING_ASR_LEVEL);
    route->gw_dst = (sg_routing_edge_t) xbt_lib_get_or_null(as_router_lib, dst_name, ROUTING_ASR_LEVEL);
    xbt_free(src_name);
    xbt_free(dst_name);
  }

  double euclidean_dist;
  xbt_dynar_t src_ctn, dst_ctn;
  char *tmp_src_name, *tmp_dst_name;

  if(src->getRcType() == SURF_NETWORK_ELEMENT_HOST){
    tmp_src_name = HOST_PEER(src->getName());

    if(p_linkUpDownList){
      info = xbt_dynar_get_as(p_linkUpDownList, src->getId(), s_surf_parsing_link_up_down_t);
      if(info.link_up) { // link up
        xbt_dynar_push_as(route->link_list, void*, info.link_up);
        if (lat)
          *lat += static_cast<Link*>(info.link_up)->getLatency();
      }
    }
示例#5
0
/**
 * \brief Destroys a task.
 *
 * The user data (if any) should have been destroyed first.
 *
 * \param task the task you want to destroy
 * \see SD_task_create()
 */
void SD_task_destroy(SD_task_t task)
{
  XBT_DEBUG("Destroying task %s...", SD_task_get_name(task));

  /* First Remove all dependencies associated with the task. */
  while (!task->predecessors->empty())
    SD_task_dependency_remove(*(task->predecessors->begin()), task);
  while (!task->inputs->empty())
    SD_task_dependency_remove(*(task->inputs->begin()), task);
  while (!task->successors->empty())
    SD_task_dependency_remove(task, *(task->successors->begin()));
  while (!task->outputs->empty())
   SD_task_dependency_remove(task, *(task->outputs->begin()));

  if (task->state == SD_SCHEDULED || task->state == SD_RUNNABLE)
    __SD_task_destroy_scheduling_data(task);

  int idx = xbt_dynar_search_or_negative(sd_global->return_set, &task);
  if (idx >=0) {
    xbt_dynar_remove_at(sd_global->return_set, idx, nullptr);
  }

  xbt_free(task->name);

  if (task->surf_action != nullptr)
    task->surf_action->unref();

  xbt_free(task->host_list);
  xbt_free(task->bytes_amount);
  xbt_free(task->flops_amount);

  xbt_mallocator_release(sd_global->task_mallocator,task);

  XBT_DEBUG("Task destroyed.");
}
示例#6
0
/**
 * \brief Schedules a task
 *
 * The task state must be #SD_NOT_SCHEDULED.
 * Once scheduled, a task is executed as soon as possible in \see SD_simulate, i.e. when its dependencies are satisfied.
 *
 * \param task the task you want to schedule
 * \param host_count number of hosts on which the task will be executed
 * \param workstation_list the hosts on which the task will be executed
 * \param flops_amount computation amount for each hosts (i.e., an array of host_count doubles)
 * \param bytes_amount communication amount between each pair of hosts (i.e., a matrix of host_count*host_count doubles)
 * \param rate task execution speed rate
 * \see SD_task_unschedule()
 */
void SD_task_schedule(SD_task_t task, int host_count, const sg_host_t * workstation_list,
                      const double *flops_amount, const double *bytes_amount, double rate)
{
  xbt_assert(host_count > 0, "workstation_nb must be positive");

  task->host_count = host_count;
  task->rate = rate;

  if (flops_amount) {
    task->flops_amount = static_cast<double*>(xbt_realloc(task->flops_amount, sizeof(double) * host_count));
    memcpy(task->flops_amount, flops_amount, sizeof(double) * host_count);
  } else {
    xbt_free(task->flops_amount);
    task->flops_amount = nullptr;
  }

  int communication_nb = host_count * host_count;
  if (bytes_amount) {
    task->bytes_amount = static_cast<double*>(xbt_realloc(task->bytes_amount, sizeof(double) * communication_nb));
    memcpy(task->bytes_amount, bytes_amount, sizeof(double) * communication_nb);
  } else {
    xbt_free(task->bytes_amount);
    task->bytes_amount = nullptr;
  }

  task->host_list =  static_cast<sg_host_t*>(xbt_realloc(task->host_list, sizeof(sg_host_t) * host_count));
  memcpy(task->host_list, workstation_list, sizeof(sg_host_t) * host_count);

  SD_task_do_schedule(task);
}
示例#7
0
文件: dict.c 项目: apargupta/simgrid
/**
 * \brief Destructor
 * \param dict the dictionary to be freed
 *
 * Frees a dictionary with all the data
 */
void xbt_dict_free(xbt_dict_t * dict)
{
    int i;
    xbt_dictelm_t current, previous;
    int table_size;
    xbt_dictelm_t *table;

    //  if ( *dict )  xbt_dict_dump_sizes(*dict);

    if (dict != NULL && *dict != NULL) {
        table_size = (*dict)->table_size;
        table = (*dict)->table;
        /* Warning: the size of the table is 'table_size+1'...
         * This is because table_size is used as a binary mask in xbt_dict_rehash */
        for (i = 0; (*dict)->count && i <= table_size; i++) {
            current = table[i];
            while (current != NULL) {
                previous = current;
                current = current->next;
                xbt_dictelm_free(*dict, previous);
                (*dict)->count--;
            }
        }
        xbt_free(table);
        xbt_free(*dict);
        *dict = NULL;
    }
}
示例#8
0
static void receiver(std::vector<std::string> args)
{
  int flow_amount = std::stoi(args.at(0));

  XBT_INFO("Receiving %d flows ...", flow_amount);

  simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::by_name(std::string("message"));

  if (flow_amount == 1) {
    void* res = mailbox->get();
    xbt_free(res);
  } else {
    void* data[flow_amount];

    // Start all comms in parallel, and wait for their completion in one shot
    std::vector<simgrid::s4u::CommPtr> comms;
    for (int i = 0; i < flow_amount; i++)
      comms.push_back(mailbox->get_async(&data[i]));

    simgrid::s4u::Comm::wait_all(&comms);
    for (int i = 0; i < flow_amount; i++)
      xbt_free(data[i]);
  }
  XBT_INFO("receiver done.");
}
示例#9
0
static xbt_dynar_t parse_factor(const char *smpi_coef_string)
{
  char *value = NULL;
  unsigned int iter = 0;
  s_smpi_factor_t fact;
  xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;

  smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
  radical_elements = xbt_str_split(smpi_coef_string, ";");
  xbt_dynar_foreach(radical_elements, iter, value) {

    radical_elements2 = xbt_str_split(value, ":");
    surf_parse_assert(xbt_dynar_length(radical_elements2) == 2,
        "Malformed radical '%s' for smpi factor. I was expecting something like 'a:b'", value);

    char *errmsg = bprintf("Invalid factor in chunk #%d: %%s", iter+1);
    fact.factor = xbt_str_parse_int(xbt_dynar_get_as(radical_elements2, 0, char *), errmsg);
    xbt_free(errmsg);
    fact.value = xbt_str_parse_double(xbt_dynar_get_as(radical_elements2, 1, char *), errmsg);
    errmsg = bprintf("Invalid factor value in chunk #%d: %%s", iter+1);
    xbt_free(errmsg);

    xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
    XBT_DEBUG("smpi_factor:\t%ld : %f", fact.factor, fact.value);
    xbt_dynar_free(&radical_elements2);
  }
示例#10
0
void AsCluster::create_links_for_node(sg_platf_cluster_cbarg_t cluster, int id, int , int position){
  s_sg_platf_link_cbarg_t link = SG_PLATF_LINK_INITIALIZER;
  s_surf_parsing_link_up_down_t info;
  char* link_id = bprintf("%s_link_%d", cluster->id, id);

  memset(&link, 0, sizeof(link));
  link.id = link_id;
  link.bandwidth = cluster->bw;
  link.latency = cluster->lat;
  link.policy = cluster->sharing_policy;
  sg_platf_new_link(&link);

  if (link.policy == SURF_LINK_FULLDUPLEX) {
    char *tmp_link = bprintf("%s_UP", link_id);
    info.link_up = sg_link_by_name(tmp_link);
    xbt_free(tmp_link);
    tmp_link = bprintf("%s_DOWN", link_id);
    info.link_down = sg_link_by_name(tmp_link);
    xbt_free(tmp_link);
  } else {
    info.link_up = sg_link_by_name(link_id);
    info.link_down = info.link_up;
  }
  xbt_dynar_set(upDownLinks, position, &info);
  xbt_free(link_id);
}
示例#11
0
void print_TICreateContainer(paje_event_t event)
{
  //if we are in the mode with only one file
  static FILE *temp = nullptr;

  if (tracing_files == nullptr) {
    tracing_files = xbt_dict_new_homogeneous(nullptr);
    //generate unique run id with time
    prefix = xbt_os_time();
  }

  if (!xbt_cfg_get_boolean("tracing/smpi/format/ti-one-file") || temp == nullptr) {
    char *folder_name = bprintf("%s_files", TRACE_get_filename());
    char *filename = bprintf("%s/%f_%s.txt", folder_name, prefix, ((createContainer_t) event->data)->container->name);
#ifdef WIN32
    _mkdir(folder_name);
#else
    mkdir(folder_name, S_IRWXU | S_IRWXG | S_IRWXO);
#endif
    temp = fopen(filename, "w");
    xbt_assert(temp, "Tracefile %s could not be opened for writing: %s", filename, strerror(errno));
    fprintf(tracing_file, "%s\n", filename);

    xbt_free(folder_name);
    xbt_free(filename);
  }

  xbt_dict_set(tracing_files, ((createContainer_t) event->data)->container->name, (void *) temp, nullptr);
}
示例#12
0
void smpi_comm_copy_buffer_callback(smx_synchro_t synchro, void *buff, size_t buff_size)
{
  XBT_DEBUG("Copy the data over");
  void* tmpbuff=buff;
  simgrid::simix::Comm *comm = dynamic_cast<simgrid::simix::Comm*>(synchro);

  if((smpi_privatize_global_variables) && ((char*)buff >= smpi_start_data_exe)
      && ((char*)buff < smpi_start_data_exe + smpi_size_data_exe )
    ){
       XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");


       smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->src_proc))->data))->index);
       tmpbuff = (void*)xbt_malloc(buff_size);
       memcpy(tmpbuff, buff, buff_size);
  }

  if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
      && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
       XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
       smpi_switch_data_segment(((smpi_process_data_t)(((simdata_process_t)SIMIX_process_get_data(comm->dst_proc))->data))->index);
  }

  memcpy(comm->dst_buff, tmpbuff, buff_size);
  if (comm->detached) {
    // if this is a detached send, the source buffer was duplicated by SMPI
    // sender to make the original buffer available to the application ASAP
    xbt_free(buff);
    //It seems that the request is used after the call there this should be free somewhere else but where???
    //xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
    comm->src_buff = NULL;
  }

  if(tmpbuff!=buff)xbt_free(tmpbuff);
}
示例#13
0
/** \ingroup m_task_management
 * \brief Destroy a #msg_task_t.
 *
 * Destructor for #msg_task_t. Note that you should free user data, if any, \b
 * before calling this function.
 *
 * Only the process that owns the task can destroy it.
 * The owner changes after a successful send.
 * If a task is successfully sent, the receiver becomes the owner and is
 * supposed to destroy it. The sender should not use it anymore.
 * If the task failed to be sent, the sender remains the owner of the task.
 */
msg_error_t MSG_task_destroy(msg_task_t task)
{
  smx_synchro_t action = NULL;
  xbt_assert((task != NULL), "Invalid parameter");

  if (task->simdata->isused) {
    /* the task is being sent or executed: cancel it first */
    MSG_task_cancel(task);
  }
  TRACE_msg_task_destroy(task);

  xbt_free(task->name);

  action = task->simdata->compute;
  if (action)
    simcall_process_execution_destroy(action);

  /* parallel tasks only */
  xbt_free(task->simdata->host_list);

  xbt_dict_free(&task->simdata->affinity_mask_db);

  /* free main structures */
  xbt_free(task->simdata);
  xbt_free(task);

  return MSG_OK;
}
示例#14
0
static void replace_bdw_ns3(char ** bdw)
{
  char *temp = xbt_strdup(*bdw);
  xbt_free(*bdw);
  *bdw = bprintf("%fBps",atof(temp));
  xbt_free(temp);

}
示例#15
0
static void route_cache_elem_free(void *e)
{
  route_cache_element_t elm = (route_cache_element_t) e;
  if (elm) {
    xbt_free(elm->pred_arr);
    xbt_free(elm);
  }
}
示例#16
0
void PJ_value_free (val_t value)
{
  XBT_DEBUG("free value %s, child of %s", value->name, value->father->name);
  xbt_free(((val_t)value)->name);
  xbt_free(((val_t)value)->color);
  xbt_free(((val_t)value)->id);
  xbt_free(value);
}
示例#17
0
void connection_free(void *data)
{
  connection_t co = (connection_t) data;
  xbt_free(co->bitfield);
  xbt_free(co->mailbox);
  xbt_free(co);

}
示例#18
0
int master_fun(int argc, char *argv[])
{
  msg_vm_t vm;
  unsigned int i;

  xbt_dynar_t worker_pms = MSG_process_get_data(MSG_process_self());
  int nb_workers = xbt_dynar_length(worker_pms);

  xbt_dynar_t vms = xbt_dynar_new(sizeof(msg_vm_t), NULL);


  /* Launch VMs and worker processes. One VM per PM, and one worker process per VM. */

  XBT_INFO("# Launch %d VMs", nb_workers);
  for (i = 0; i< nb_workers; i++) {
    char *vm_name = bprintf("VM%02d", i);
    char *pr_name = bprintf("WRK%02d", i);

    msg_host_t pm = xbt_dynar_get_as(worker_pms, i, msg_host_t);

    XBT_INFO("create %s on PM(%s)", vm_name, MSG_host_get_name(pm));
    msg_vm_t vm = MSG_vm_create_core(pm, vm_name);

    s_vm_params_t params;
    memset(&params, 0, sizeof(params));
    params.ramsize = 1L * 1024 * 1024 * 1024; // 1Gbytes
    MSG_host_set_params(vm, &params);

    MSG_vm_start(vm);
    xbt_dynar_push(vms, &vm);

    XBT_INFO("put a process (%s) on %s", pr_name, vm_name);
    MSG_process_create(pr_name, worker_fun, NULL, vm);

    xbt_free(vm_name);
    xbt_free(pr_name);
  }


  /* Send a bunch of work to every one */
  XBT_INFO("# Send a task to %d worker process", nb_workers);
  send_tasks(nb_workers);

  XBT_INFO("# Suspend all VMs");
  xbt_dynar_foreach(vms, i, vm) {
    const char *vm_name = MSG_host_get_name(vm);
    XBT_INFO("suspend %s", vm_name);
    MSG_vm_suspend(vm);
  }

  XBT_INFO("# Wait a while");
  MSG_process_sleep(2);

  XBT_INFO("# Resume all VMs");
  xbt_dynar_foreach(vms, i, vm) {
    MSG_vm_resume(vm);
  }
示例#19
0
/* Frees the memory used by a task and destroy it */
static void task_free(void* task)
{
  if(task != NULL){
    s_task_data_t* data = (s_task_data_t*)MSG_task_get_data(task);
    xbt_free(data->state);
    xbt_free(data);
    MSG_task_destroy(task);
  }
}
示例#20
0
void free_memory_map(memory_map_t map){

  int i;
  for(i=0; i< map->mapsize; i++){
    xbt_free(map->regions[i].pathname);
  }
  xbt_free(map->regions);
  xbt_free(map);
}
示例#21
0
文件: dynar.cpp 项目: mpoquet/simgrid
/** @brief Destructor of the structure not touching to the content
 *
 * \param dynar poor victim
 *
 * kilkil a dynar BUT NOT its content. Ie, the array is freed, but the content is not touched (the \a free_f function
 * is not used)
 */
void xbt_dynar_free_container(xbt_dynar_t* dynar)
{
  if (dynar && *dynar) {
    xbt_dynar_t d = *dynar;
    xbt_free(d->data);
    xbt_free(d);
    *dynar = nullptr;
  }
}
示例#22
0
文件: peer.c 项目: R7R8/simgrid
void peer_delete(peer_t p)
{
  xbt_dynar_free(&p->pending_recvs);
  xbt_dynar_free(&p->pending_sends);
  xbt_free(p->me);
  xbt_free(p->prev);
  xbt_free(p->next);

  xbt_free(p);
}
示例#23
0
/* Destroys the data memorized by SD_task_schedule. Task state must be SD_SCHEDULED or SD_RUNNABLE. */
static void __SD_task_destroy_scheduling_data(SD_task_t task)
{
  if (task->state != SD_SCHEDULED && task->state != SD_RUNNABLE)
    THROWF(arg_error, 0, "Task '%s' must be SD_SCHEDULED or SD_RUNNABLE", SD_task_get_name(task));

  xbt_free(task->flops_amount);
  xbt_free(task->bytes_amount);
  task->flops_amount = nullptr;
  task->bytes_amount = nullptr;
}
示例#24
0
static void action_reduce(const char *const *action)
{
  int i;
  char *reduce_identifier;
  char mailbox[80];
  double comm_size = parse_double(action[2]);
  double comp_size = parse_double(action[3]);
  msg_task_t comp_task = NULL;
  const char *process_name;
  double clock = MSG_get_clock();

  process_globals_t counters =
      (process_globals_t) MSG_process_get_data(MSG_process_self());

  xbt_assert(communicator_size, "Size of Communicator is not defined, "
             "can't use collective operations");

  process_name = MSG_process_get_name(MSG_process_self());

  reduce_identifier = bprintf("reduce_%d", counters->reduce_counter++);

  if (!strcmp(process_name, "p0")) {
    XBT_DEBUG("%s: %s is the Root", reduce_identifier, process_name);

    msg_comm_t *comms = xbt_new0(msg_comm_t, communicator_size - 1);
    msg_task_t *tasks = xbt_new0(msg_task_t, communicator_size - 1);
    for (i = 1; i < communicator_size; i++) {
      sprintf(mailbox, "%s_p%d_p0", reduce_identifier, i);
      comms[i - 1] = MSG_task_irecv(&(tasks[i - 1]), mailbox);
    }
    MSG_comm_waitall(comms, communicator_size - 1, -1);
    for (i = 1; i < communicator_size; i++) {
      MSG_comm_destroy(comms[i - 1]);
      MSG_task_destroy(tasks[i - 1]);
    }
    xbt_free(comms);
    xbt_free(tasks);

    comp_task = MSG_task_create("reduce_comp", comp_size, 0, NULL);
    XBT_DEBUG("%s: computing 'reduce_comp'", reduce_identifier);
    MSG_task_execute(comp_task);
    MSG_task_destroy(comp_task);
    XBT_DEBUG("%s: computed", reduce_identifier);

  } else {
    XBT_DEBUG("%s: %s sends", reduce_identifier, process_name);
    sprintf(mailbox, "%s_%s_p0", reduce_identifier, process_name);
    XBT_DEBUG("put on %s", mailbox);
    MSG_task_send(MSG_task_create(reduce_identifier, 0, comm_size, NULL),
                  mailbox);
  }

  log_action(action, MSG_get_clock() - clock);
  xbt_free(reduce_identifier);
}
示例#25
0
  xbt_dynar_foreach(vms, i, vm) {
    unsigned int index = i + xbt_dynar_length(vms);
    char *vm_name = bprintf("VM%02d", i);
    char *pr_name = bprintf("WRK%02d", index);

    XBT_INFO("put a process (%s) on %s", pr_name, vm_name);
    MSG_process_create(pr_name, worker_fun, NULL, vm);

    xbt_free(vm_name);
    xbt_free(pr_name);
  }
示例#26
0
static void rule_route_extended_free(void *e)
{
  rule_route_extended_t *elem = (rule_route_extended_t *) e;
  if (*elem) {
    xbt_dynar_free(&(*elem)->generic_rule_route.re_str_link);
    pcre_free((*elem)->generic_rule_route.re_src);
    pcre_free((*elem)->generic_rule_route.re_dst);
    xbt_free((*elem)->re_src_gateway);
    xbt_free((*elem)->re_dst_gateway);
    xbt_free(*elem);
  }
}
示例#27
0
int main(int argc, char **argv)
{
  int i, size, rank;
  int count = 2048;
  int *values;
  int status;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  
  values = (int *) xbt_malloc(count * sizeof(int));  

  for (i = 0; i < count; i++)
    values[i] = (0 == rank) ? 17 : 3;

  MPI_Bcast(values, count, MPI_INT, 0, MPI_COMM_WORLD);

  int good = 0;
  for (i = 0; i < count; i++)
    if (values[i]==17) good++;
  printf("[%d] number of values equals to 17: %d\n", rank, good);

  MPI_Barrier(MPI_COMM_WORLD);
  xbt_free(values);

  count = 4096;
  values = (int *) xbt_malloc(count * sizeof(int));  

  for (i = 0; i < count; i++)
    values[i] = (size -1 == rank) ? 17 : 3;

  status = MPI_Bcast(values, count, MPI_INT, size-1, MPI_COMM_WORLD);

  good = 0;
  for (i = 0; i < count; i++)
    if (values[i]==17) good++;
  printf("[%d] number of values equals to 17: %d\n", rank, good);


  
  
  if (rank == 0) {
    if (status != MPI_SUCCESS) {
      printf("bcast returned %d\n", status);
      fflush(stdout);
    }
  }
  xbt_free(values);
  MPI_Finalize();
  return 0;
}
示例#28
0
/** \ingroup m_process_management
 * \brief Creates and runs a new #msg_process_t.

 * A constructor for #msg_process_t taking four arguments and returning the corresponding object. The structure (and
 * the corresponding thread) is created, and put in the list of ready process.
 * \param name a name for the object. It is for user-level information and can be nullptr.
 * \param code is a function describing the behavior of the process. It should then only use functions described
 * in \ref m_process_management (to create a new #msg_process_t for example),
   in \ref m_host_management (only the read-only functions i.e. whose name contains the word get),
   in \ref m_task_management (to create or destroy some #msg_task_t for example) and
   in \ref msg_task_usage (to handle file transfers and task processing).
 * \param data a pointer to any data one may want to attach to the new object.  It is for user-level information and
 *        can be nullptr. It can be retrieved with the function \ref MSG_process_get_data.
 * \param host the location where the new process is executed.
 * \param argc first argument passed to \a code
 * \param argv second argument passed to \a code. WARNING, these strings are freed by the SimGrid kernel when the
 *             process exits, so they cannot be static nor shared between several processes.
 * \param properties list a properties defined for this process
 * \see msg_process_t
 * \return The new corresponding object.
 */
msg_process_t MSG_process_create_with_environment(const char *name, xbt_main_func_t code, void *data, msg_host_t host,
                                                  int argc, char **argv, xbt_dict_t properties)
{
  std::function<void()> function;
  if (code)
    function = simgrid::xbt::wrapMain(code, argc, const_cast<const char*const*>(argv));
  msg_process_t res = MSG_process_create_with_environment(name,
    std::move(function), data, host, properties);
  for (int i = 0; i != argc; ++i)
    xbt_free(argv[i]);
  xbt_free(argv);
  return res;
}
示例#29
0
/** \ingroup msg_file_management
 * \brief Close the file
 *
 * \param fd is the file to close
 * \return 0 on success or 1 on error
 */
int MSG_file_close(msg_file_t fd)
{
  char *name;
  msg_file_priv_t priv = MSG_file_priv(fd);
  if (priv->data)
    xbt_free(priv->data);

  int res = simcall_file_close(priv->simdata->smx_file, MSG_host_self());
  name = bprintf("%s:%i:%s",MSG_host_get_name(MSG_host_self()),MSG_process_self_PID(),priv->fullpath);
  xbt_lib_unset(file_lib, name, MSG_FILE_LEVEL, 1);
  xbt_free(name);
  return res;
}
示例#30
0
/** \brief Destructor
 * \param m the mallocator you want to destroy
 *
 * Destroy the mallocator and all its data. The function
 * free_f is called on each object in the mallocator.
 *
 * \see xbt_mallocator_new()
 */
void xbt_mallocator_free(xbt_mallocator_t m)
{

  int i;
  xbt_assert(m != NULL, "Invalid parameter");

  XBT_VERB("Frees mallocator %p (size:%d/%d)", m, m->current_size,
        m->max_size);
  for (i = 0; i < m->current_size; i++) {
    m->free_f(m->objects[i]);
  }
  xbt_free(m->objects);
  xbt_free(m);
}