/** \brief Insert \e data under all the keys contained in \e keys, providing their sizes in \e lens. * * \arg mdict: the multi-dict * \arg keys: dynar of (char *) containing all the keys * \arg lens: length of each element of \e keys * \arg data: where to put what was found in structure * \arg free_ctn: function to use to free the pushed content on need * * Dynars are not modified during the operation. */ void *xbt_multidict_get_ext(xbt_dict_t mdict, xbt_dynar_t keys, xbt_dynar_t lens) { xbt_dict_t thislevel, nextlevel; int i; unsigned long int thislen; char *thiskey; int keys_len = xbt_dynar_length(keys); xbt_assert(xbt_dynar_length(keys) == xbt_dynar_length(lens)); xbt_assert(!xbt_dynar_is_empty(keys), "Can't get a zero-long key set in a multidict"); XBT_DEBUG("xbt_multidict_get(%p, %ld)", mdict, xbt_dynar_length(keys)); for (i = 0, thislevel = mdict; i < keys_len - 1; i++, thislevel = nextlevel) { xbt_dynar_get_cpy(keys, i, &thiskey); xbt_dynar_get_cpy(lens, i, &thislen); XBT_DEBUG("multi_get: at level %d (%p), len=%ld, key=%p |%*s|", i, thislevel, thislen, thiskey, (int) thislen, thiskey); /* search the dict of next level: let mismatch raise if not found */ nextlevel = xbt_dict_get_ext(thislevel, thiskey, thislen); } xbt_dynar_get_cpy(keys, i, &thiskey); xbt_dynar_get_cpy(lens, i, &thislen); return xbt_dict_get_ext(thislevel, thiskey, thislen); }
/** @brief shows some debugging info about the bucklet repartition */ void xbt_dict_dump_sizes(xbt_dict_t dict) { int i; unsigned int count; unsigned int size; xbt_dictelm_t element; xbt_dynar_t sizes = xbt_dynar_new(sizeof(int), NULL); printf("Dict %p: %d bucklets, %d used cells (of %d) ", dict, dict->count, dict->fill, dict->table_size); if (dict != NULL) { for (i = 0; i < dict->table_size; i++) { element = dict->table[i]; size = 0; if (element) { while (element != NULL) { size++; element = element->next; } } if (xbt_dynar_length(sizes) <= size) { int prevsize = 1; xbt_dynar_set(sizes, size, &prevsize); } else { int prevsize; xbt_dynar_get_cpy(sizes, size, &prevsize); prevsize++; xbt_dynar_set(sizes, size, &prevsize); } } if (!all_sizes) all_sizes = xbt_dynar_new(sizeof(int), NULL); xbt_dynar_foreach(sizes, count, size) { /* Copy values of this one into all_sizes */ int prevcount; if (xbt_dynar_length(all_sizes) <= count) { prevcount = size; xbt_dynar_set(all_sizes, count, &prevcount); } else { xbt_dynar_get_cpy(all_sizes, count, &prevcount); prevcount += size; xbt_dynar_set(all_sizes, count, &prevcount); } /* Report current sizes */ if (count == 0) continue; if (size == 0) continue; printf("%uelm x %u cells; ", count, size); } }
/* * Estimate the time at which all the input data of a task are available (i.e., * have been transfered to) on its current allocation. This estimation * corresponds to the maximum value among the compute parents of the task of * the sum of the estimated finish time of the parent and estimated transfer * time of the data sent by this parent. For control dependencies, the second * part is obviously discarded. */ double SD_task_estimate_last_data_arrival_time (SD_task_t task){ unsigned int i; double last_data_arrival = -1., data_availability, estimated_transfer_time; SD_task_t parent, grand_parent; xbt_dynar_t parents, grand_parents; parents = SD_task_get_parents(task); xbt_dynar_foreach(parents, i, parent){ if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) { grand_parents = SD_task_get_parents(parent); xbt_dynar_get_cpy(grand_parents, 0, &grand_parent); estimated_transfer_time = SD_task_estimate_transfer_time_from(grand_parent, task, SD_task_get_amount(parent)); data_availability = SD_task_get_estimated_finish_time(grand_parent)+ estimated_transfer_time; xbt_dynar_free_container(&grand_parents); } else { data_availability = SD_task_get_estimated_finish_time(parent); } if (last_data_arrival < data_availability) last_data_arrival = data_availability; } xbt_dynar_free_container(&parents); return last_data_arrival; }
/* Build the set of hosts/VMs that have to be terminated. This function selects how_many VMs from the source set of * candidates. * Remark: In the paper by Malawski et al., no details are provided about how the VMs are selected in the source set. * Moreover, there is no check w.r.t. the size of the source set. * Assumptions: * 1) If the source set is too small, display a warning and return a smaller set than expected. * 2) Straightforward selection of the VMs in the set. Just pick the how_many first ones without any consideration of * the time remaining until the next hourly billing cycle. Just check if the VM is not currently executing a task */ xbt_dynar_t find_active_VMs_to_stop(int how_many, xbt_dynar_t source){ int i, found; long unsigned int source_size = xbt_dynar_length(source); xbt_dynar_t to_stop = xbt_dynar_new(sizeof(sg_host_t), NULL); sg_host_t v; if (how_many > source_size){ XBT_WARN("Trying to terminate more VMs than what is available (%d > %lu)." " Change the number of VMs to terminate to %lu", how_many, source_size, source_size); how_many = source_size; } i = 0; found = 0; while(found < how_many && i < xbt_dynar_length(source)){ /* No advanced selection process. Just pick the how_many first idle VMs in the source set. */ xbt_dynar_get_cpy(source, i, &v); HostAttribute attr = sg_host_user(v); if (!attr->idle_busy){ xbt_dynar_push(to_stop, &v); found++; } i++; } if (found < how_many) XBT_WARN("Trying to terminate too many VMs, some are busy... Change the number of VMs to terminate to %d", found); return to_stop; }
/* This function is actually not used by biCPA */ double top_level_recursive_computation(SD_task_t task){ unsigned int i; double max_top_level = 0.0, my_top_level, current_parent_top_level = 0.0; SD_task_t parent, grand_parent=NULL; xbt_dynar_t parents, grand_parents; max_top_level = -1.0; if (!strcmp(SD_task_get_name(task),"root")){ XBT_DEBUG("root's top level is 0.0"); SD_task_mark(task); SD_task_set_top_level(task, 0.0); return 0.0; } parents = SD_task_get_parents(task); xbt_dynar_foreach(parents, i, parent){ if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) { grand_parents = SD_task_get_parents(parent); xbt_dynar_get_cpy(grand_parents, 0, &grand_parent); if (SD_task_is_marked(grand_parent)){ current_parent_top_level = SD_task_get_top_level(grand_parent) + SD_task_estimate_execution_time(grand_parent, SD_task_get_allocation_size(grand_parent)); } else { current_parent_top_level = top_level_recursive_computation(grand_parent) + SD_task_estimate_execution_time(grand_parent, SD_task_get_allocation_size(grand_parent)); } xbt_dynar_free_container(&grand_parents); } else { if (SD_task_is_marked(parent)){ current_parent_top_level = SD_task_get_top_level(parent) + SD_task_estimate_execution_time(parent, SD_task_get_allocation_size(parent)); } else { current_parent_top_level = top_level_recursive_computation(parent) + SD_task_estimate_execution_time(parent, SD_task_get_allocation_size(parent)); } } if (max_top_level < current_parent_top_level) max_top_level = current_parent_top_level; } my_top_level = max_top_level; SD_task_set_top_level(task, my_top_level); SD_task_mark(task); XBT_DEBUG("%s's top level is %f", SD_task_get_name(task), my_top_level); xbt_dynar_free_container(&parents); return my_top_level; }
void xbt_multidict_set_ext(xbt_dict_t mdict, xbt_dynar_t keys, xbt_dynar_t lens, void *data, void_f_pvoid_t free_ctn) { xbt_dict_t thislevel, nextlevel = NULL; int i; unsigned long int thislen; char *thiskey; int keys_len = xbt_dynar_length(keys); xbt_assert(xbt_dynar_length(keys) == xbt_dynar_length(lens)); xbt_assert(keys_len, "Can't set a zero-long key set in a multidict"); XBT_DEBUG("xbt_multidict_set(%p,%d)", mdict, keys_len); for (i = 0, thislevel = mdict; i < keys_len - 1; i++, thislevel = nextlevel) { xbt_dynar_get_cpy(keys, i, &thiskey); xbt_dynar_get_cpy(lens, i, &thislen); XBT_DEBUG("multi_set: at level %d, len=%ld, key=%p |%*s|", i, thislen, thiskey, (int) thislen, thiskey); /* search the dict of next level */ nextlevel = xbt_dict_get_or_null_ext(thislevel, thiskey, thislen); if (nextlevel == NULL) { /* make sure the dict of next level exists */ nextlevel = xbt_dict_new(); XBT_VERB("Create a dict (%p)", nextlevel); xbt_dict_set_ext(thislevel, thiskey, thislen, nextlevel, &_free_dict); } } xbt_dynar_get_cpy(keys, i, &thiskey); xbt_dynar_get_cpy(lens, i, &thislen); xbt_dict_set_ext(thislevel, thiskey, thislen, data, free_ctn); }
void xbt_multidict_remove_ext(xbt_dict_t mdict, xbt_dynar_t keys, xbt_dynar_t lens) { volatile xbt_dict_t thislevel; volatile xbt_dict_t nextlevel = NULL; volatile int i; xbt_ex_t e; unsigned long int thislen; char *thiskey; int keys_len = xbt_dynar_length(keys); xbt_assert(xbt_dynar_length(keys) == xbt_dynar_length(lens)); xbt_assert(xbt_dynar_length(keys), "Can't remove a zero-long key set in a multidict"); for (i = 0, thislevel = mdict; i < keys_len - 1; i++, thislevel = nextlevel) { xbt_dynar_get_cpy(keys, i, &thiskey); xbt_dynar_get_cpy(lens, i, &thislen); /* search the dict of next level */ TRY { nextlevel = xbt_dict_get_ext(thislevel, thiskey, thislen); } CATCH(e) { /* If non-existant entry, nothing to do */ if (e.category == arg_error) xbt_ex_free(e); else RETHROW; } } xbt_dynar_get_cpy(keys, i, &thiskey); xbt_dynar_get_cpy(lens, i, &thislen); xbt_dict_remove_ext(thislevel, thiskey, thislen); }
double bottom_level_recursive_computation(SD_task_t task){ unsigned int i; double my_bottom_level = 0.0, max_bottom_level, current_child_bottom_level = 0.0; SD_task_t child, grand_child; xbt_dynar_t children, grand_children; my_bottom_level = SD_task_estimate_execution_time(task, SD_task_get_allocation_size(task)); max_bottom_level = -1.0; if (!strcmp(SD_task_get_name(task),"end")){ XBT_DEBUG("end's bottom level is 0.0"); SD_task_mark(task); SD_task_set_bottom_level(task, 0.0); return 0.0; } children = SD_task_get_children(task); xbt_dynar_foreach(children, i, child){ if (SD_task_get_kind(child) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) { grand_children = SD_task_get_children(child); xbt_dynar_get_cpy(grand_children, 0, &grand_child); if (SD_task_is_marked(grand_child)){ current_child_bottom_level = SD_task_get_bottom_level(grand_child); } else { current_child_bottom_level = bottom_level_recursive_computation(grand_child); } xbt_dynar_free_container(&grand_children); } else { if (SD_task_is_marked(child)){ current_child_bottom_level = SD_task_get_bottom_level(child); } else { current_child_bottom_level = bottom_level_recursive_computation(child); } } if (max_bottom_level < current_child_bottom_level) max_bottom_level = current_child_bottom_level; } my_bottom_level += max_bottom_level; SD_task_set_bottom_level(task, my_bottom_level); SD_task_mark(task); XBT_DEBUG("%s's bottom level is %f", SD_task_get_name(task), my_bottom_level); xbt_dynar_free_container(&children); return my_bottom_level ; }
/* This function is actually not used by biCPA */ int precedence_level_recursive_computation(SD_task_t task){ unsigned int i; int my_prec_level = -1, current_parent_prec_level = 0; SD_task_t parent, grand_parent=NULL; xbt_dynar_t parents, grand_parents; if (!strcmp(SD_task_get_name(task),"root")){ XBT_DEBUG("root's precedence level is 0.0"); SD_task_mark(task); SD_task_set_precedence_level(task, 0); return 0; } parents = SD_task_get_parents(task); xbt_dynar_foreach(parents, i, parent){ if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) { grand_parents = SD_task_get_parents(parent); xbt_dynar_get_cpy(grand_parents, 0, &grand_parent); if (SD_task_is_marked(grand_parent)){ current_parent_prec_level = SD_task_get_precedence_level(grand_parent) + 1; } else { current_parent_prec_level = precedence_level_recursive_computation(grand_parent) + 1; } xbt_dynar_free_container(&grand_parents); } else { if (SD_task_is_marked(parent)){ current_parent_prec_level = SD_task_get_precedence_level(parent) + 1; } else { current_parent_prec_level = precedence_level_recursive_computation(parent) + 1; } } if (my_prec_level < current_parent_prec_level) my_prec_level = current_parent_prec_level; } SD_task_set_precedence_level(task, my_prec_level); SD_task_mark(task); XBT_DEBUG("%s's precedence level is %d", SD_task_get_name(task), my_prec_level); xbt_dynar_free_container(&parents); return my_prec_level; }
static double finish_on_at(SD_task_t task, sg_host_t host) { double result; unsigned int i; double data_available = 0.; double redist_time = 0; double last_data_available; xbt_dynar_t parents = SD_task_get_parents(task); if (!xbt_dynar_is_empty(parents)) { /* compute last_data_available */ SD_task_t parent; last_data_available = -1.0; xbt_dynar_foreach(parents, i, parent) { /* normal case */ if (SD_task_get_kind(parent) == SD_TASK_COMM_E2E) { xbt_dynar_t grand_parents = SD_task_get_parents(parent); SD_task_t grand_parent; xbt_assert(xbt_dynar_length(grand_parents) <2, "Error: transfer %s has 2 parents", SD_task_get_name(parent)); xbt_dynar_get_cpy(grand_parents, 0, &grand_parent); sg_host_t * grand_parent_host_list = SD_task_get_workstation_list(grand_parent); /* Estimate the redistribution time from this parent */ if (SD_task_get_amount(parent) <= 1e-6){ redist_time= 0; } else { redist_time = SD_route_get_latency(grand_parent_host_list[0], host) + SD_task_get_amount(parent) / SD_route_get_bandwidth(grand_parent_host_list[0], host); } data_available = SD_task_get_finish_time(grand_parent) + redist_time; xbt_dynar_free_container(&grand_parents); } /* no transfer, control dependency */ if (SD_task_get_kind(parent) == SD_TASK_COMP_SEQ) { data_available = SD_task_get_finish_time(parent); } if (last_data_available < data_available) last_data_available = data_available; } xbt_dynar_free_container(&parents); result = MAX(sg_host_get_available_at(host), last_data_available) + SD_task_get_amount(task)/sg_host_speed(host); } else {
/* Build the set of the compute successors of a task that are ready (i.e., with all parents already scheduled). Both * data and control dependencies are checked. As more than one transfer may exist between two compute tasks, it is * mandatory to check whether the successor is not already in the set. */ xbt_dynar_t SD_task_get_ready_children(SD_task_t t){ unsigned int i; xbt_dynar_t children=NULL, ready_children; xbt_dynar_t output_transfers = SD_task_get_children(t); SD_task_t output, child; ready_children = xbt_dynar_new(sizeof(SD_task_t), NULL); xbt_dynar_foreach(output_transfers, i, output){ if (SD_task_get_kind(output) == SD_TASK_COMM_E2E) { /* Data dependency case: a compute task is followed by a data transfer. Its child (in a scheduling sense) is * then the grand child */ children = SD_task_get_children(output); xbt_dynar_get_cpy(children, 0, &child); /* check if this child is already in the set */ if (xbt_dynar_member(ready_children, &child)){ XBT_DEBUG("%s already seen, ignore", SD_task_get_name(child)); xbt_dynar_free_container(&children); /* avoid memory leaks */ continue; } if (SD_task_get_kind(child) == SD_TASK_COMP_SEQ && (SD_task_get_state(child) == SD_NOT_SCHEDULED || SD_task_get_state(child) == SD_SCHEDULABLE) && SD_task_is_ready(child)){ xbt_dynar_push(ready_children, &child); } xbt_dynar_free_container(&children); /* avoid memory leaks */ } else { /* Control dependency case: a compute task successor is another compute task. */ /* check if this child is already in the set */ if (xbt_dynar_member(ready_children, &output)){ XBT_DEBUG("%s already seen, ignore", SD_task_get_name(output)); continue; } if (SD_task_get_kind(output) == SD_TASK_COMP_SEQ && (SD_task_get_state(output) == SD_NOT_SCHEDULED || SD_task_get_state(output) == SD_SCHEDULABLE)&& SD_task_is_ready(output)){ xbt_dynar_push(ready_children, &output); } } } xbt_dynar_free_container(&output_transfers); /* avoid memory leaks */ return ready_children; }
static xbt_dynar_t rulebased_get_onelink_routes(AS_t rc) { xbt_dynar_t ret = xbt_dynar_new (sizeof(onelink_t), xbt_free); //We have already bypass cluster routes with network NS3 if(!strcmp(surf_network_model->name,"network NS3")) return ret; char *k1; //find router sg_routing_edge_t router = NULL; xbt_lib_cursor_t cursor; xbt_lib_foreach(as_router_lib, cursor, k1, router) { if (router->rc_type == SURF_NETWORK_ELEMENT_ROUTER) break; } if (!router) xbt_die ("rulebased_get_onelink_routes works only if the AS is a cluster, sorry."); sg_routing_edge_t host = NULL; xbt_lib_foreach(as_router_lib, cursor, k1, host){ void *link_ptr; sg_platf_route_cbarg_t route = xbt_new0(s_sg_platf_route_cbarg_t,1); route->link_list = xbt_dynar_new(sizeof(sg_routing_link_t),NULL); rulebased_get_route_and_latency (rc, router, host, route,NULL); switch (xbt_dynar_length(route->link_list)) { case 1: //loopback break; case 2: xbt_dynar_get_cpy (route->link_list, 1, &link_ptr); onelink_t onelink = xbt_new0 (s_onelink_t, 1); onelink->src = host; onelink->dst = router; onelink->link_ptr = link_ptr; xbt_dynar_push (ret, &onelink); break; default: xbt_die("rulebased_get_onelink_routes works only if the AS is a cluster, sorry."); break; } }
/* * Return an estimation of the minimal time before which a task can start. This * time depends on the estimated finished time of the compute ancestors of the * task, as set when they have been scheduled. Two cases are considered, * depending on whether an ancestor is 'linked' to the task through a flow or * control dependency. Flow dependencies imply to look at the grand parents of * the task, while control dependencies look at the parent tasks directly. */ double SD_task_estimate_minimal_start_time(SD_task_t task){ unsigned int i; double min_start_time=0.0; xbt_dynar_t parents, grand_parents; SD_task_t parent, grand_parent; parents = SD_task_get_parents(task); xbt_dynar_foreach(parents, i, parent){ if (SD_task_get_kind(parent) == SD_TASK_COMM_PAR_MXN_1D_BLOCK) { grand_parents = SD_task_get_parents(parent); xbt_dynar_get_cpy(grand_parents, 0, &grand_parent); if (SD_task_get_estimated_finish_time(grand_parent) > min_start_time) min_start_time = SD_task_get_estimated_finish_time(grand_parent); xbt_dynar_free_container(&grand_parents); } else{ if (SD_task_get_estimated_finish_time(parent) > min_start_time) min_start_time = SD_task_get_estimated_finish_time(parent); } } xbt_dynar_free_container(&parents); return min_start_time; }
/* Determine if a task is ready. The condition to meet is that all its compute predecessors have to be in one of the * following state: * - SD_RUNNABLE * - SD_RUNNING * - SD_DONE */ int SD_task_is_ready(SD_task_t task){ unsigned int i; int is_ready = 1; xbt_dynar_t parents, grand_parents; SD_task_t parent, grand_parent; parents = SD_task_get_parents(task); if (xbt_dynar_length(parents)) { xbt_dynar_foreach(parents, i, parent){ if (SD_task_get_kind(parent) == SD_TASK_COMM_E2E) { /* Data dependency case: a compute task is preceded by a data transfer. Its parent (in a scheduling sense) is * then the grand parent */ grand_parents = SD_task_get_parents(parent); xbt_dynar_get_cpy(grand_parents, 0, &grand_parent); if (SD_task_get_state(grand_parent) < SD_SCHEDULED) { is_ready =0; xbt_dynar_free_container(&grand_parents); /* avoid memory leaks */ break; } else { xbt_dynar_free_container(&grand_parents); /* avoid memory leaks */ } } else { /* Control dependency case: a compute task predecessor is another compute task. */ if (SD_task_get_state(parent) < SD_SCHEDULED) { is_ready =0; break; } } } } xbt_dynar_free_container(&parents); /* avoid memory leaks */ return is_ready; }
/* * Get the dummy 'end' task of a DAG, i.e., the last task of the dynar. */ SD_task_t get_dag_end(xbt_dynar_t dag){ SD_task_t task; xbt_dynar_get_cpy(dag, xbt_dynar_length(dag)-1, &task); return task; }
/* * Get the dummy 'root' task of a DAG, i.e., the first task of the dynar. */ SD_task_t get_dag_root(xbt_dynar_t dag){ SD_task_t task; xbt_dynar_get_cpy(dag, 0, &task); return task; }