static void ack_recv(int status, orte_process_name_t *sender, orcm_pnp_tag_t tag, struct iovec *msg, int count, opal_buffer_t *buf, void *cbdata) { int rc, n; orcm_tool_cmd_t flag; /* unpack the cmd and verify it is us */ n=1; opal_dss.unpack(buf, &flag, &n, ORCM_TOOL_CMD_T); if (ORCM_TOOL_STOP_CMD != flag) { /* wrong cmd */ opal_output(0, "GOT WRONG CMD"); return; } /* unpack the result of the stop/term command */ n=1; opal_dss.unpack(buf, &rc, &n, OPAL_INT); ORTE_UPDATE_EXIT_STATUS(rc); if (0 == rc) { opal_output(orte_clean_output, "Job stopped"); } else { opal_output(orte_clean_output, "Job failed to stop with error %s", ORTE_ERROR_NAME(rc)); } /* the fact we recvd this is enough */ orcm_finalize(); exit(0); }
static void process_release(int fd, short flag, void *data) { /* delete our local event */ opal_event_del(&rel_ev); close(rel_pipe[0]); close(rel_pipe[1]); orcm_finalize(); exit(0); }
int main(int argc, char* argv[]) { struct timespec tp; int rc, delay; target.jobid = ORTE_JOBID_INVALID; target.vpid = ORTE_VPID_INVALID; /* init the ORCM library - this includes registering * a multicast recv so we hear announcements and * their responses from other apps */ if (ORCM_SUCCESS != (rc = orcm_init(ORCM_APP))) { fprintf(stderr, "Failed to init: error %d\n", rc); exit(1); } opal_event_signal_set(opal_event_base, &sigterm_handler, SIGTERM, signal_trap, &sigterm_handler); opal_event_signal_add(&sigterm_handler, NULL); opal_event_signal_set(opal_event_base, &sigint_handler, SIGINT, signal_trap, &sigint_handler); opal_event_signal_add(&sigint_handler, NULL); /* for this application, register to hear messages sent to our input */ if (ORCM_SUCCESS != (rc = orcm_pnp.register_receive("client", "2.0", "beta", ORCM_PNP_GROUP_INPUT_CHANNEL, ORCM_TEST_CLIENT_SERVER_TAG, recv_input, NULL))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* open a channel to any client 1.0 peers */ if (ORCM_SUCCESS != (rc = orcm_pnp.open_channel("client", "1.0", NULL, ORTE_JOBID_WILDCARD, found_channel))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* announce our existence */ if (ORCM_SUCCESS != (rc = orcm_pnp.announce("CLIENT", "2.0", "beta", responses))) { ORTE_ERROR_LOG(rc); goto cleanup; } opal_output(0, "CLIENT2 %s ACTIVE", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)); /* init the msg number */ msg_num = 0; /* wake up every x seconds send something */ ORTE_TIMER_EVENT(ORTE_PROC_MY_NAME->vpid + 1, 0, send_data); opal_event_dispatch(opal_event_base); cleanup: orcm_finalize(); return rc; }
int main(int argc, char *argv[]) { int ret, exit_status = ORTE_SUCCESS; orte_ps_mpirun_info_t hnpinfo; /*************** * Initialize ***************/ if (ORTE_SUCCESS != (ret = orte_ps_init(argc, argv))) { exit_status = ret; goto cleanup; } /* gather info from the scheduler */ opal_output_verbose(10, orte_ps_globals.output, "orte_ps: Gathering Information"); OBJ_CONSTRUCT(&hnpinfo, orte_ps_mpirun_info_t); if (ORTE_SUCCESS == (ret = gather_information(&hnpinfo))) { /* Print the information */ if (orte_ps_globals.parseable) { if (ORTE_SUCCESS != (ret = parseable_print(&hnpinfo))) { exit_status = ret; } } else { if(ORTE_SUCCESS != (ret = pretty_print(&hnpinfo)) ) { exit_status = ret; } } } else { /* this could be due to a stale session directory - if so, * just skip this entry, but don't abort */ if (ORTE_ERR_SILENT != ret) { orte_show_help("help-orte-ps.txt", "stale-hnp", true); } } cleanup: /*************** * Cleanup ***************/ orcm_finalize(); return exit_status; }
int main(int argc, char* argv[]) { int i; float pi; int rc; /* init the ORCM library - this includes registering * a multicast recv so we hear announcements and * their responses from other apps */ if (ORCM_SUCCESS != (rc = orcm_init(ORCM_APP))) { fprintf(stderr, "Failed to init: error %d\n", rc); exit(1); } opal_event_signal_set(opal_event_base, &sigterm_handler, SIGTERM, signal_trap, &sigterm_handler); opal_event_signal_add(&sigterm_handler, NULL); opal_event_signal_set(opal_event_base, &sigint_handler, SIGINT, signal_trap, &sigint_handler); opal_event_signal_add(&sigint_handler, NULL); /* listen on my input channel for direct messages */ if (ORCM_SUCCESS != (rc = orcm_pnp.register_receive("LISTENER_IOVEC", "1.0", "alpha", ORCM_PNP_GROUP_INPUT_CHANNEL, ORCM_PNP_TAG_WILDCARD, recv_input, NULL))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* announce our existence */ if (ORCM_SUCCESS != (rc = orcm_pnp.announce("LISTENER_IOVEC", "1.0", "alpha", NULL))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* just sit here */ opal_event_dispatch(opal_event_base); cleanup: orcm_finalize(); return rc; }
void orcm_just_quit(int fd, short flags, void*arg) { if (ORCM_PROC_IS_APP || ORCM_PROC_IS_TOOL) { /* whack any lingering session directory files from our job */ orte_session_dir_cleanup(ORTE_PROC_MY_NAME->jobid); } else { if (ORCM_PROC_IS_DAEMON) { /* kill any local procs */ orte_odls.kill_local_procs(NULL); } /* whack any lingering session directory files from our jobs */ orte_session_dir_cleanup(ORTE_JOBID_WILDCARD); } /* cleanup and leave */ orcm_finalize(); exit(orte_exit_status); }
int main(int argc, char *argv[]) { int erri = ORCM_SUCCESS; while (ORCM_SUCCESS == erri) { /* initialize, parse command line, and setup frameworks */ erri = orcm_octl_work(argc, argv); if (ORCM_SUCCESS != erri) { break; } if (ORCM_SUCCESS != (erri = orcm_finalize())) { orcm_octl_error("orcm-finalize"); break; } break; } return erri; }
int main(int argc, char* argv[]) { struct timespec tp; int rc; /* init the ORCM library - this includes registering * a multicast recv so we hear announcements and * their responses from other apps */ if (ORCM_SUCCESS != (rc = orcm_init(ORCM_APP))) { fprintf(stderr, "Failed to init: error %d\n", rc); exit(1); } opal_event_signal_set(opal_event_base, &sigterm_handler, SIGTERM, signal_trap, &sigterm_handler); opal_event_signal_add(&sigterm_handler, NULL); opal_event_signal_set(opal_event_base, &sigint_handler, SIGINT, signal_trap, &sigint_handler); opal_event_signal_add(&sigint_handler, NULL); /* announce our existence */ if (ORCM_SUCCESS != (rc = orcm_pnp.announce("TALKER_IOVEC", "1.0", "alpha", NULL))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* for this application, there are no desired * inputs, so we don't register any */ ORTE_TIMER_EVENT(ORTE_PROC_MY_NAME->vpid + 1, 0, send_data); opal_event_dispatch(opal_event_base); cleanup: orcm_finalize(); return rc; }
int main(int argc, char* argv[]) { int rc; if (1 < argc) { tp.tv_sec = strtol(argv[1], NULL, 10); } else { tp.tv_sec = 0; } if (2 < argc) { tp.tv_usec = strtol(argv[2], NULL, 10); } else { tp.tv_usec = 10000; } /* init the ORCM library - this includes registering * a multicast recv so we hear announcements and * their responses from other apps */ if (ORCM_SUCCESS != (rc = orcm_init(ORCM_APP))) { fprintf(stderr, "Failed to init: error %d\n", rc); exit(1); } opal_event_signal_set(opal_event_base, &sigterm_handler, SIGTERM, signal_trap, &sigterm_handler); opal_event_signal_add(&sigterm_handler, NULL); opal_event_signal_set(opal_event_base, &sigint_handler, SIGINT, signal_trap, &sigint_handler); opal_event_signal_add(&sigint_handler, NULL); /* open a channel to send to the listener application */ if (ORCM_SUCCESS != (rc = orcm_pnp.open_channel("LISTENER_IOVEC", "1.0", "alpha", ORTE_JOBID_WILDCARD, found_channel))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* listen on my input channel for direct messages */ if (ORCM_SUCCESS != (rc = orcm_pnp.register_receive("TALKER_IOVEC", "1.0", "alpha", ORCM_PNP_GROUP_INPUT_CHANNEL, ORCM_PNP_TAG_WILDCARD, recv_input, NULL))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* announce our existence */ if (ORCM_SUCCESS != (rc = orcm_pnp.announce("TALKER_IOVEC", "1.0", "alpha", NULL))) { ORTE_ERROR_LOG(rc); goto cleanup; } /* wake up every x seconds to send something */ ORTE_TIMER_EVENT(tp.tv_sec, tp.tv_usec, send_data); opal_event_dispatch(opal_event_base); cleanup: orcm_finalize(); return rc; }
int main(int argc, char *argv[]) { orcm_alloc_t alloc, *aptr; orte_rml_recv_cb_t xfer; opal_buffer_t *buf; int rc, n; orcm_scd_cmd_flag_t command=ORCM_SESSION_REQ_COMMAND; orcm_alloc_id_t id; struct timeval tv; /* initialize, parse command line, and setup frameworks */ orcm_osub_init(argc, argv); /* create an allocation request */ OBJ_CONSTRUCT(&alloc, orcm_alloc_t); alloc.priority = 1; // session priority alloc.account = orcm_osub_globals.account; // account to be charged alloc.name = orcm_osub_globals.name; // user-assigned project name alloc.gid = orcm_osub_globals.gid; // group id to be run under alloc.max_nodes = orcm_osub_globals.max_nodes; // max number of nodes alloc.max_pes = orcm_osub_globals.max_pes; // max number of processing elements alloc.min_nodes = orcm_osub_globals.min_nodes; // min number of nodes required alloc.min_pes = orcm_osub_globals.min_pes; // min number of pe's required alloc.exclusive = orcm_osub_globals.exclusive; // true if nodes to be exclusively allocated (i.e., not shared across sessions) alloc.interactive = orcm_osub_globals.interactive; // true if in interactive mode alloc.nodes = '\0'; // regex of nodes to be used alloc.parent_name = ORTE_NAME_PRINT(ORTE_PROC_MY_NAME); // my_daemon_name alloc.parent_uri = '\0'; // my_daemon uri address /* alloc.constraints = orcm_osub_globals.resources */ ; // list of resource constraints to be applied when selecting hosts alloc.hnpname = '\0'; //my hnp name alloc.hnpuri = '\0'; //my hnp uri alloc.caller_uid = getuid(); // caller uid, not from args alloc.caller_gid = getgid(); // caller gid, not from args if (NULL == orcm_osub_globals.starttime || 0 == strlen(orcm_osub_globals.starttime)) { gettimeofday(&tv,NULL); /* desired start time for allocation deafults to now */ alloc.begin = tv.tv_sec; } else { /* TODO: eventually parse the string to figure out what user means, for now its now */ gettimeofday(&tv,NULL); alloc.begin = tv.tv_sec; } if (NULL == orcm_osub_globals.walltime || 0 == strlen(orcm_osub_globals.walltime)) { /* desired walltime default to 10 min */ alloc.walltime = 600; } else { /* get this in seconds for now, but will be parsed for more complexity later */ alloc.walltime = (time_t)strtol(orcm_osub_globals.walltime, NULL, 10); // max execution time } /* setup to receive the result */ OBJ_CONSTRUCT(&xfer, orte_rml_recv_cb_t); xfer.active = true; orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, ORCM_RML_TAG_SCD, ORTE_RML_NON_PERSISTENT, orte_rml_recv_callback, &xfer); /* send it to the scheduler */ buf = OBJ_NEW(opal_buffer_t); /* pack the alloc command flag */ if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &command,1, ORCM_SCD_CMD_T))) { ORTE_ERROR_LOG(rc); return rc; } aptr = &alloc; if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &aptr, 1, ORCM_ALLOC))) { ORTE_ERROR_LOG(rc); return rc; } if (ORTE_SUCCESS != (rc = orte_rml.send_buffer_nb(ORTE_PROC_MY_SCHEDULER, buf, ORCM_RML_TAG_SCD, orte_rml_send_callback, NULL))) { ORTE_ERROR_LOG(rc); OBJ_RELEASE(buf); OBJ_DESTRUCT(&xfer); return rc; } /* get our allocated jobid */ n=1; ORTE_WAIT_FOR_COMPLETION(xfer.active); if (OPAL_SUCCESS != (rc = opal_dss.unpack(&xfer.data, &id, &n, ORCM_ALLOC_ID_T))) { ORTE_ERROR_LOG(rc); OBJ_DESTRUCT(&xfer); return rc; } opal_output(0, "RECEIVED ALLOC ID %d", (int)id); if (ORTE_SUCCESS != orcm_finalize()) { fprintf(stderr, "Failed orcm_finalize\n"); exit(1); } return ORTE_SUCCESS; }
int main(int argc, char *argv[]) { int32_t ret, i; opal_cmd_line_t cmd_line; char **inpt; opal_buffer_t *buf; int count; char cwd[OPAL_PATH_MAX]; orcm_tool_cmd_t flag = ORCM_TOOL_STOP_CMD; int32_t master=0; uint16_t jfam=0; /*************** * Initialize ***************/ /* * Make sure to init util before parse_args * to ensure installdirs is setup properly * before calling mca_base_open(); */ if( ORTE_SUCCESS != (ret = orcm_init_util()) ) { return ret; } /* initialize the globals */ my_globals.help = false; my_globals.replicas = NULL; my_globals.sched = NULL; my_globals.hnp_uri = NULL; /* Parse the command line options */ opal_cmd_line_create(&cmd_line, cmd_line_opts); mca_base_open(); mca_base_cmd_line_setup(&cmd_line); ret = opal_cmd_line_parse(&cmd_line, true, argc, argv); /* extract the MCA/GMCA params */ mca_base_cmd_line_process_args(&cmd_line, &environ, &environ); /** * Now start parsing our specific arguments */ if (OPAL_SUCCESS != ret || my_globals.help) { char *args = NULL; args = opal_cmd_line_get_usage_msg(&cmd_line); orte_show_help("help-orcm-stop.txt", "usage", true, args); free(args); return ORTE_ERROR; } if (NULL != my_globals.sched) { if (0 == strncmp(my_globals.sched, "file", strlen("file")) || 0 == strncmp(my_globals.sched, "FILE", strlen("FILE"))) { char input[1024], *filename; FILE *fp; /* it is a file - get the filename */ filename = strchr(my_globals.sched, ':'); if (NULL == filename) { /* filename is not correctly formatted */ orte_show_help("help-openrcm-runtime.txt", "hnp-filename-bad", true, "scheduler", my_globals.sched); return ORTE_ERROR; } ++filename; /* space past the : */ if (0 >= strlen(filename)) { /* they forgot to give us the name! */ orte_show_help("help-openrcm-runtime.txt", "hnp-filename-bad", true, "scheduler", my_globals.sched); return ORTE_ERROR; } /* open the file and extract the pid */ fp = fopen(filename, "r"); if (NULL == fp) { /* can't find or read file! */ orte_show_help("help-openrcm-runtime.txt", "hnp-filename-access", true, "scheduler", filename); return ORTE_ERROR; } if (NULL == fgets(input, 1024, fp)) { /* something malformed about file */ fclose(fp); orte_show_help("help-openrcm-runtime.txt", "hnp-file-bad", "scheduler", true, filename); return ORTE_ERROR; } fclose(fp); input[strlen(input)-1] = '\0'; /* remove newline */ /* convert the pid */ master = strtoul(input, NULL, 10); } else { /* should just be the master itself */ master = strtoul(my_globals.sched, NULL, 10); } } /* if we were given HNP contact info, parse it and * setup the process_info struct with that info */ if (NULL != my_globals.hnp_uri) { if (0 == strncmp(my_globals.hnp_uri, "file", strlen("file")) || 0 == strncmp(my_globals.hnp_uri, "FILE", strlen("FILE"))) { char input[1024], *filename; FILE *fp; /* it is a file - get the filename */ filename = strchr(my_globals.hnp_uri, ':'); if (NULL == filename) { /* filename is not correctly formatted */ orte_show_help("help-openrcm-runtime.txt", "hnp-filename-bad", true, "uri", my_globals.hnp_uri); goto cleanup; } ++filename; /* space past the : */ if (0 >= strlen(filename)) { /* they forgot to give us the name! */ orte_show_help("help-openrcm-runtime.txt", "hnp-filename-bad", true, "uri", my_globals.hnp_uri); goto cleanup; } /* open the file and extract the uri */ fp = fopen(filename, "r"); if (NULL == fp) { /* can't find or read file! */ orte_show_help("help-openrcm-runtime.txt", "hnp-filename-access", true, filename); goto cleanup; } if (NULL == fgets(input, 1024, fp)) { /* something malformed about file */ fclose(fp); orte_show_help("help-openrcm-runtime.txt", "hnp-file-bad", true, filename); goto cleanup; } fclose(fp); input[strlen(input)-1] = '\0'; /* remove newline */ /* put into the process info struct */ orte_process_info.my_hnp_uri = strdup(input); } else { /* should just be the uri itself */ orte_process_info.my_hnp_uri = strdup(my_globals.hnp_uri); } } if (OPAL_SUCCESS != opal_getcwd(cwd, sizeof(cwd))) { opal_output(orte_clean_output, "failed to get cwd\n"); return ORTE_ERR_NOT_FOUND; } /*************************** * We need all of OPAL and ORTE - this will * automatically connect us to the CM ***************************/ if (ORTE_SUCCESS != orcm_init(ORCM_TOOL)) { orcm_finalize(); return 1; } /* if we were given the hnp uri, extract the job family for the * master id */ if (NULL != my_globals.hnp_uri) { master = ORTE_JOB_FAMILY(ORTE_PROC_MY_HNP->jobid); } /* register to receive responses */ if (ORCM_SUCCESS != (ret = orcm_pnp.register_receive("orcm-stop", "0.1", "alpha", ORCM_PNP_GROUP_INPUT_CHANNEL, ORCM_PNP_TAG_TOOL, ack_recv, NULL))) { ORTE_ERROR_LOG(ret); goto cleanup; } /* announce my existence */ if (ORCM_SUCCESS != (ret = orcm_pnp.announce("orcm-stop", "0.1", "alpha", NULL))) { ORTE_ERROR_LOG(ret); goto cleanup; } /* setup the buffer to send our cmd */ buf = OBJ_NEW(opal_buffer_t); /* indicate the scheduler to be used */ jfam = master & 0x0000ffff; opal_dss.pack(buf, &jfam, 1, OPAL_UINT16); /* get the apps to stop */ inpt = NULL; opal_cmd_line_get_tail(&cmd_line, &count, &inpt); if (0 == count) { /* if no apps were given, then we stop the entire * DVM itself by telling the daemon's to terminate */ if (ORCM_SUCCESS != (ret = orcm_pnp.output_nb(ORCM_PNP_SYS_CHANNEL, NULL, ORCM_PNP_TAG_TERMINATE, NULL, 0, buf, cbfunc, NULL))) { ORTE_ERROR_LOG(ret); } goto cleanup; } else { /* load the stop cmd */ opal_dss.pack(buf, &flag, 1, ORCM_TOOL_CMD_T); /* for each app */ for (i=0; NULL != inpt[i]; i++) { opal_dss.pack(buf, &inpt[i], 1, OPAL_STRING); /* pack the replicas to be stopped */ opal_dss.pack(buf, &my_globals.replicas, 1, OPAL_STRING); } opal_argv_free(inpt); if (ORCM_SUCCESS != (ret = orcm_pnp.output_nb(ORCM_PNP_SYS_CHANNEL, NULL, ORCM_PNP_TAG_TOOL, NULL, 0, buf, cbfunc, NULL))) { ORTE_ERROR_LOG(ret); } } /* now wait for ack */ opal_event_dispatch(opal_event_base); /*************** * Cleanup ***************/ cleanup: orcm_finalize(); return ret; }
int main(int argc, char *argv[]) { int32_t ret; opal_cmd_line_t cmd_line; char *args = NULL; /*************** * Initialize ***************/ /* * Make sure to init util before parse_args * to ensure installdirs is setup properly * before calling mca_base_open(); */ if( ORTE_SUCCESS != (ret = orcm_init_util()) ) { return ret; } /* initialize the globals */ my_globals.help = false; my_globals.monitor = false; my_globals.update_rate = 5; my_globals.sched = 0; /* Parse the command line options */ opal_cmd_line_create(&cmd_line, cmd_line_opts); mca_base_open(); mca_base_cmd_line_setup(&cmd_line); ret = opal_cmd_line_parse(&cmd_line, true, argc, argv); /* extract the MCA/GMCA params */ mca_base_cmd_line_process_args(&cmd_line, &environ, &environ); /** * Now start parsing our specific arguments */ if (OPAL_SUCCESS != ret || my_globals.help) { args = opal_cmd_line_get_usage_msg(&cmd_line); orte_show_help("help-orcm-ps.txt", "usage", true, args); free(args); return ORTE_ERROR; } /*************************** * We need all of OPAL and ORTE - this will * automatically connect us to the CM ***************************/ if (ORCM_SUCCESS != (ret = orcm_init(ORCM_TOOL))) { goto cleanup; } /* register to receive responses */ if (ORCM_SUCCESS != (ret = orcm_pnp.register_receive("orcm-ps", "0.1", "alpha", ORCM_PNP_GROUP_INPUT_CHANNEL, ORCM_PNP_TAG_PS, ps_recv, NULL))) { ORTE_ERROR_LOG(ret); goto cleanup; } /* announce my existence */ if (ORCM_SUCCESS != (ret = orcm_pnp.announce("orcm-ps", "0.1", "alpha", NULL))) { ORTE_ERROR_LOG(ret); goto cleanup; } /* define an event to signal completion */ if (pipe(rel_pipe) < 0) { opal_output(0, "Cannot open release pipe"); goto cleanup; } opal_event_set(opal_event_base, &rel_ev, rel_pipe[0], OPAL_EV_READ, process_release, NULL); opal_event_add(&rel_ev, 0); /* we know we need to print the data once */ update_data(0, 0, NULL); opal_event_dispatch(opal_event_base); /*************** * Cleanup ***************/ cleanup: /* cleanup orcm */ orcm_finalize(); return ret; }
int main(int argc, char *argv[]) { orte_rml_recv_cb_t xfer; opal_buffer_t *buf; int rc, i, n, wfid; orcm_analytics_cmd_flag_t command; FILE *fp; opal_value_t *oflow_value; opal_value_t **oflow_array = NULL; orte_process_name_t wf_agg; /* initialize, parse command line, and setup frameworks */ orcm_oflow_init(argc, argv); if (NULL == (fp = fopen(orcm_oflow_globals.file, "r"))) { perror("Can't open workflow file"); if (ORTE_SUCCESS != orcm_finalize()) { fprintf(stderr, "Failed orcm_finalize\n"); exit(1); } return ORCM_ERR_BAD_PARAM; } i = 0; oflow_value = oflow_parse_next_line(fp); while(oflow_value) { if (0 == strncmp("VPID", oflow_value->key, ORCM_MAX_LINE_LENGTH)) { wf_agg.jobid = 0; wf_agg.vpid = (orte_vpid_t)strtol(oflow_value->data.string, (char **)NULL, 10); printf("Sending to %s\n", ORTE_NAME_PRINT(&wf_agg)); free(oflow_value); oflow_value = oflow_parse_next_line(fp); continue; } printf("KEY: %s \n\tVALUE: %s\n", oflow_value->key, oflow_value->data.string); oflow_array = (opal_value_t**)realloc(oflow_array, (sizeof(oflow_array) + sizeof(opal_value_t*))); if (!oflow_array) { fclose(fp); free(oflow_value); return ORCM_ERR_OUT_OF_RESOURCE; } oflow_array[i] = oflow_value; oflow_value = oflow_parse_next_line(fp); i++; } fclose(fp); /* setup to receive the result */ OBJ_CONSTRUCT(&xfer, orte_rml_recv_cb_t); xfer.active = true; orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, ORCM_RML_TAG_ANALYTICS, ORTE_RML_NON_PERSISTENT, orte_rml_recv_callback, &xfer); /* setup to recieve workflow output */ orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, 12345, ORTE_RML_PERSISTENT, orcm_oflow_recv, NULL); buf = OBJ_NEW(opal_buffer_t); command = ORCM_ANALYTICS_WORKFLOW_CREATE; /* pack the alloc command flag */ if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &command, 1, OPAL_UINT8))) { goto ERROR; } /* pack the length of the array */ if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &i, 1, OPAL_INT))) { goto ERROR; } if (oflow_array) { /* pack the array */ if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, oflow_array, i, OPAL_VALUE))) { goto ERROR; } } /* send it to the aggregator */ if (ORTE_SUCCESS != (rc = orte_rml.send_buffer_nb(&wf_agg, buf, ORCM_RML_TAG_ANALYTICS, orte_rml_send_callback, NULL))) { goto ERROR; } /* unpack workflow id */ ORTE_WAIT_FOR_COMPLETION(xfer.active); n=1; if (OPAL_SUCCESS != (rc = opal_dss.unpack(&xfer.data, &wfid, &n, OPAL_INT))) { goto ERROR; } printf("Workflow created with id: %i\n", wfid); OBJ_DESTRUCT(&xfer); if (ORTE_SUCCESS != orcm_finalize()) { fprintf(stderr, "Failed orcm_finalize\n"); exit(1); } return ORTE_SUCCESS; ERROR: if (NULL != oflow_array) { for (n = 0; n < i; n++) { OBJ_RELEASE(oflow_array[n]); } free(oflow_array); } ORTE_ERROR_LOG(rc); OBJ_RELEASE(buf); OBJ_DESTRUCT(&xfer); return rc; }