Esempio n. 1
0
void Gene::get_node_list(gene_t_p t){
  if(t->mov==if_f||t->mov==proc){
    node_list.push_back(&(t->lt));
    node_list.push_back(&(t->rt));
    get_node_list(t->lt);
    get_node_list(t->rt);
  }
  return;
}
Esempio n. 2
0
gene_t_p * Gene::get_random_node(){
  node_list.clear();
  node_list.push_back(&t_tr);
  get_node_list(t_tr);
  mt19937 mt(rd());
  uniform_int_distribution<int> rd_int(0,node_list.size()-1);
  return node_list[rd_int(mt)];
}
Esempio n. 3
0
DVector<String> AnimationTreePlayer::_get_node_list() {

	List<StringName> nl;
	get_node_list(&nl);
	DVector<String> ret;
	ret.resize(nl.size());
	int idx=0;
	for(List<StringName>::Element *E=nl.front();E;E=E->next()) {
		ret.set(idx++,E->get());
	}

	return ret;
}
Esempio n. 4
0
void Shader::_get_property_list( List<PropertyInfo> *p_list) const {

	List<int> nodes;
	get_node_list(&nodes);

	for(List<int>::Element *E=nodes.front();E;E=E->next()) {

		int idx=E->get();
		p_list->push_back(PropertyInfo( Variant::DICTIONARY , "nodes/"+itos(idx),PROPERTY_HINT_NONE,"",PROPERTY_USAGE_NETWORK|PROPERTY_USAGE_STORAGE ) );
	}

	List<Connection> connections;
	get_connections(&connections);
	int idx=0;
	for(List<Connection>::Element *E=connections.front();E;E=E->next()) {
		p_list->push_back(PropertyInfo( Variant::DICTIONARY , "conns/"+itos(idx++),PROPERTY_HINT_NONE,"",PROPERTY_USAGE_NETWORK|PROPERTY_USAGE_STORAGE ) );
	}

}
Esempio n. 5
0
int
gtm_standby_restore_node(void)
{
	GTM_PGXCNodeInfo *data;
	int rc, i;
	int num_node;

	elog(LOG, "Copying node information from the GTM active...");

	data = (GTM_PGXCNodeInfo *) malloc(sizeof(GTM_PGXCNodeInfo) * 128);
	memset(data, 0, sizeof(GTM_PGXCNodeInfo) * 128);

	rc = get_node_list(GTM_ActiveConn, data, 128);
	if (rc < 0)
	{
		elog(DEBUG3, "get_node_list() failed.");
		rc = 0;
		goto finished;
	}

	num_node = rc;

	for (i = 0; i < num_node; i++)
	{
		elog(LOG, "get_node_list: nodetype=%d, nodename=%s, datafolder=%s",
			 data[i].type, data[i].nodename, data[i].datafolder);
		if (Recovery_PGXCNodeRegister(data[i].type, data[i].nodename, data[i].port,
					 data[i].proxyname, data[i].status,
					 data[i].ipaddress, data[i].datafolder, true,
					 -1 /* dummy socket */) != 0)
		{
			rc = 0;
			goto finished;
		}
	}

	elog(LOG, "Copying node information from GTM active done.");

finished:
	free(data);
	return rc;
}
Esempio n. 6
0
int main(int argc, char const *argv[]) {
	struct LinkList* list = create_list();

	append_list(list, 1);
	insert_list(list, 2, 0);
	push_list(list, 10);
	show_list(list);

	int data = pop_list(list);
	printf("pop data is %d\n", data);
	show_list(list);

	push_list(list, 10);
	push_list(list, 11);
	push_list(list, 10);
	push_list(list, 11);
	show_list(list);

	remove_list(list, 1);
	show_list(list);

	remove_data_list(list, 10);
	show_list(list);

	remove_all_data_list(list, 11);
	show_list(list);

	struct Node* node = get_node_list(list, 0);
	printf("data is %d\n", node->data);

	int index = search_list(list, 2);
	printf("index is %d\n", index);

	destory_list(list);
	return 0;
}
Esempio n. 7
0
/* we cannot use the RML to communicate with SLURM as it doesn't
 * understand our internal protocol, so we have to do a bare-bones
 * exchange based on sockets
 */
static int dyn_allocate(orte_job_t *jdata)
{
    char *cmd_str, **cmd=NULL, *tmp, *jstring;
    char *node_list;
    orte_app_context_t *app;
    int i;
    struct timeval tv;
    local_jobtracker_t *jtrk;
    int64_t i64, *i64ptr;

    if (NULL == mca_ras_slurm_component.config_file) {
        opal_output(0, "Cannot perform dynamic allocation as no Slurm configuration file provided");
        return ORTE_ERR_NOT_FOUND;
    }

    /* track this request */
    jtrk = OBJ_NEW(local_jobtracker_t);
    jtrk->jobid = jdata->jobid;
    opal_list_append(&jobs, &jtrk->super);

    /* construct the command - note that the jdata structure contains
     * a field for the minimum number of nodes required for the job.
     * The node list can be constructed from the union of all the nodes
     * contained in the dash_host field of the app_contexts. So you'll
     * need to do a little work to build the command. We don't currently
     * have a field in the jdata structure for "mandatory" vs "optional"
     * allocations, so we'll have to add that someday. Likewise, you may
     * want to provide a param to adjust the timeout value
     */
    /* construct the cmd string */
    opal_argv_append_nosize(&cmd, "allocate");
    /* add the jobid */
    orte_util_convert_jobid_to_string(&jstring, jdata->jobid);
    asprintf(&tmp, "jobid=%s", jstring);
    opal_argv_append_nosize(&cmd, tmp);
    free(tmp);
    free(jstring);
    /* if we want the allocation for all apps in one shot,
     * then tell slurm
     *
     * RHC: we don't currently have the ability to handle
     * rolling allocations in the rest of the code base
     */
#if 0
    if (!mca_ras_slurm_component.rolling_alloc) {
        opal_argv_append_nosize(&cmd, "return=all");
    }
#else
    opal_argv_append_nosize(&cmd, "return=all");
#endif

    /* pass the timeout */
    asprintf(&tmp, "timeout=%d", mca_ras_slurm_component.timeout);
    opal_argv_append_nosize(&cmd, tmp);
    free(tmp);

    /* for each app, add its allocation request info */
    i64ptr = &i64;
    for (i=0; i < jdata->apps->size; i++) {
        if (NULL == (app = (orte_app_context_t*)opal_pointer_array_get_item(jdata->apps, i))) {
            continue;
        }
        /* add the app id, preceded by a colon separator */
        asprintf(&tmp, ": app=%d", (int)app->idx);
        opal_argv_append_nosize(&cmd, tmp);
        free(tmp);
        /* add the number of process "slots" we need */
        asprintf(&tmp, "np=%d", app->num_procs);
        opal_argv_append_nosize(&cmd, tmp);
        free(tmp);
        /* if we were given a minimum number of nodes, pass it along */
        if (orte_get_attribute(&app->attributes, ORTE_APP_MIN_NODES, (void**)&i64ptr, OPAL_INT64)) {
            asprintf(&tmp, "N=%ld", (long int)i64);
            opal_argv_append_nosize(&cmd, tmp);
            free(tmp);
        }
        /* add the list of nodes, if one was given, ensuring
         * that each node only appears once
         */
        node_list =  get_node_list(app);
        if (NULL != node_list) {
            asprintf(&tmp, "node_list=%s", node_list);
            opal_argv_append_nosize(&cmd, tmp);
            free(node_list);
            free(tmp);
        }
        /* add the mandatory/optional flag */
        if (orte_get_attribute(&app->attributes, ORTE_APP_MANDATORY, NULL, OPAL_BOOL)) {
            opal_argv_append_nosize(&cmd, "flag=mandatory");
        } else {
            opal_argv_append_nosize(&cmd, "flag=optional");
        }
    }

    /* assemble it into the final cmd to be sent */
    cmd_str = opal_argv_join(cmd, ' ');
    opal_argv_free(cmd);

    /* start a timer - if the response to our request doesn't appear
     * in the defined time, then we will error out as Slurm isn't
     * responding to us
     */
    opal_event_evtimer_set(orte_event_base, &jtrk->timeout_ev, timeout, jtrk);
    tv.tv_sec = mca_ras_slurm_component.timeout * 2;
    tv.tv_usec = 0;
    opal_event_evtimer_add(&jtrk->timeout_ev, &tv);

    opal_output_verbose(2, orte_ras_base_framework.framework_output,
                        "%s slurm:dynalloc cmd_str = %s",
                        ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
                        cmd_str);

    if (send(socket_fd, cmd_str, strlen(cmd_str)+1, 0) < 0) {
        ORTE_ERROR_LOG(ORTE_ERR_COMM_FAILURE);
    }
    free(cmd_str);

    /* we cannot wait here for a response as we
     * are already in an event. So return a value
     * that indicates we are waiting for an
     * allocation so the base functions know
     * that they shouldn't progress the job
     */
    return ORTE_ERR_ALLOCATION_PENDING;
}