Exemplo n.º 1
0
int smpd_post_abort_command(char *fmt, ...)
{
    int result;
    char error_str[2048] = "";
    smpd_command_t *cmd_ptr;
    smpd_context_t *context;
    va_list list;

    smpd_enter_fn(FCNAME);

    va_start(list, fmt);
    vsnprintf(error_str, 2048, fmt, list);
    va_end(list);

    result = smpd_create_command("abort", smpd_process.id, 0, SMPD_FALSE, &cmd_ptr);
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("unable to create an abort command.\n");
	smpd_exit_fn(FCNAME);
	return SMPD_FAIL;
    }
    result = smpd_add_command_arg(cmd_ptr, "error", error_str);
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("Unable to add the error string to the abort command.\n");
	smpd_exit_fn(FCNAME);
	return SMPD_FAIL;
    }
    result = smpd_command_destination(0, &context);
    if(result != SMPD_SUCCESS){
        smpd_err_printf("Unable to find destination for command...Aborting: %s\n", error_str);
        smpd_exit_fn(FCNAME);
        return SMPD_FAIL;
    }

    if (context == NULL)
    {
	if (smpd_process.left_context == NULL)
	{
	    printf("Aborting: %s\n", error_str);
	    fflush(stdout);
	    smpd_exit_fn(FCNAME);
	    smpd_exit(-1);
	}

	smpd_process.closing = SMPD_TRUE;
	result = smpd_create_command("close", 0, 1, SMPD_FALSE, &cmd_ptr);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("unable to create the close command to tear down the job tree.\n");
	    smpd_exit_fn(FCNAME);
	    return SMPD_FAIL;
	}
	result = smpd_post_write_command(smpd_process.left_context, cmd_ptr);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("unable to post a write of the close command to tear down the job tree as part of the abort process.\n");
	    smpd_exit_fn(FCNAME);
	    return SMPD_FAIL;
	}
    }
    else
    {
	smpd_dbg_printf("sending abort command to %s context: \"%s\"\n", smpd_get_context_str(context), cmd_ptr->cmd);
	result = smpd_post_write_command(context, cmd_ptr);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("unable to post a write of the abort command to the %s context.\n", smpd_get_context_str(context));
	    smpd_exit_fn(FCNAME);
	    return SMPD_FAIL;
	}
    }
    smpd_exit_fn(FCNAME);
    return SMPD_SUCCESS;
}
Exemplo n.º 2
0
int mpiexec_rsh()
{
    int i;
    smpd_launch_node_t *launch_node_ptr;
    smpd_process_t *process, **processes;
    int result;
    char *iter1, *iter2;
    char exe[SMPD_MAX_EXE_LENGTH];
    char *p;
    char ssh_cmd[100] = "ssh -x";
    SMPDU_Sock_set_t set;
    SMPD_BOOL escape_escape = SMPD_TRUE;
    char *env_str;
    int maxlen;
    SMPDU_Sock_t abort_sock;
    smpd_context_t *abort_context = NULL;
    smpd_command_t *cmd_ptr;
    PROCESS_HANDLE_TYPE hnd;

    smpd_enter_fn("mpiexec_rsh");

#ifdef HAVE_WINDOWS_H
    SetConsoleCtrlHandler(mpiexec_rsh_handler, TRUE);
#else
    /* setup a signall hander? */
#endif

    p = getenv("MPIEXEC_RSH");
    if (p != NULL && strlen(p) > 0){
	    strncpy(ssh_cmd, p, 100);
    }

    p = getenv("MPIEXEC_RSH_NO_ESCAPE");
    if (p != NULL){
	    if (smpd_is_affirmative(p) || strcmp(p, "1") == 0){
	        escape_escape = SMPD_FALSE;
	    }
    }

    result = SMPDU_Sock_create_set(&set);
    if (result != SMPD_SUCCESS){
	    smpd_err_printf("unable to create a set for the mpiexec_rsh.\n");
	    smpd_exit_fn("mpiexec_rsh");
	    return SMPD_FAIL;
    }

    smpd_process.nproc = smpd_process.launch_list->nproc;

    if (smpd_process.use_pmi_server){
	    result = start_pmi_server(smpd_process.launch_list->nproc, root_host, 100, &root_port);
	    if (result != SMPD_SUCCESS){
	        smpd_err_printf("mpiexec_rsh is unable to start the local pmi server.\n");
	        smpd_exit_fn("mpiexec_rsh");
	        return SMPD_FAIL;
	    }
	    smpd_dbg_printf("the pmi server is listening on %s:%d\n", root_host, root_port);
    }
    else{
	    /* start the root smpd */
	    result = start_root_smpd(root_host, SMPD_MAX_HOST_LENGTH, &root_port, &hnd);
	    if (result != SMPD_SUCCESS){
		    smpd_err_printf("mpiexec_rsh is unable to start the root smpd.\n");
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }
	    smpd_dbg_printf("the root smpd is listening on %s:%d\n", root_host, root_port);

	    /* create a connection to the root smpd used to abort the job */
	    result = ConnectToHost(root_host, root_port, SMPD_CONNECTING_RPMI, set, &abort_sock, &abort_context);
	    if (result != SMPD_SUCCESS){
	        smpd_exit_fn("mpiexec_rsh");
	        return SMPD_FAIL;
	    }
    }

    processes = (smpd_process_t**)MPIU_Malloc(sizeof(smpd_process_t*) * smpd_process.launch_list->nproc);
    if (processes == NULL){
	    smpd_err_printf("unable to allocate process array.\n");
	    smpd_exit_fn("mpiexec_rsh");
	    return SMPD_FAIL;
    }

    launch_node_ptr = smpd_process.launch_list;
    for (i=0; i<smpd_process.launch_list->nproc; i++){
	    if (launch_node_ptr == NULL){
		    smpd_err_printf("Error: not enough launch nodes.\n");
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }

	    /* initialize process structure */
	    result = smpd_create_process_struct(i, &process);
	    if (result != SMPD_SUCCESS){
		    smpd_err_printf("unable to create a process structure.\n");
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }
	    /* no need for a pmi context */
	    if (process->pmi){
		    smpd_free_context(process->pmi);
        }
	    process->pmi = NULL;
	    /* change stdout and stderr to rsh behavior: 
	     * write stdout/err directly to stdout/err instead of creating
	     * an smpd stdout/err command 
	     */
	    if (process->out != NULL){
	        process->out->type = SMPD_CONTEXT_STDOUT_RSH;
	    }
	    if (process->err != NULL){
	        process->err->type = SMPD_CONTEXT_STDERR_RSH;
	    }
	    MPIU_Strncpy(process->clique, launch_node_ptr->clique, SMPD_MAX_CLIQUE_LENGTH);
	    MPIU_Strncpy(process->dir, launch_node_ptr->dir, SMPD_MAX_DIR_LENGTH);
	    MPIU_Strncpy(process->domain_name, smpd_process.kvs_name, SMPD_MAX_DBS_NAME_LEN);
	    MPIU_Strncpy(process->env, launch_node_ptr->env, SMPD_MAX_ENV_LENGTH);
	    if (escape_escape == SMPD_TRUE && smpd_process.mpiexec_run_local != SMPD_TRUE){
		    /* convert \ to \\ to make cygwin ssh happy */
		    iter1 = launch_node_ptr->exe;
		    iter2 = exe;
		    while (*iter1){
			    if (*iter1 == '\\'){
				    *iter2 = *iter1;
				    iter2++;
				    *iter2 = *iter1;
			    }
			    else{
				    *iter2 = *iter1;
			    }
			    iter1++;
			    iter2++;
		    }
		    *iter2 = '\0';
		    /*printf("[%s] -> [%s]\n", launch_node_ptr->exe, exe);*/
	    }
	    else{
	        MPIU_Strncpy(exe, launch_node_ptr->exe, SMPD_MAX_EXE_LENGTH);
	    }

	    /* Two samples for testing on the local machine */

	    /* static rPMI initialization */
	    /*sprintf(process->exe, "env PMI_RANK=%d PMI_SIZE=%d PMI_KVS=%s PMI_ROOT_HOST=%s PMI_ROOT_PORT=8888 PMI_ROOT_LOCAL=1 PMI_APPNUM=%d %s",
		    launch_node_ptr->iproc, launch_node_ptr->nproc, smpd_process.kvs_name, root_host, launch_node_ptr->appnum, exe);*/

	    /* dynamic rPMI initialization */
	    /*sprintf(process->exe, "env PMI_RANK=%d PMI_SIZE=%d PMI_KVS=%s PMI_ROOT_HOST=%s PMI_ROOT_PORT=%d PMI_ROOT_LOCAL=0 PMI_APPNUM=%d %s",
		    launch_node_ptr->iproc, launch_node_ptr->nproc, smpd_process.kvs_name, root_host, root_port, launch_node_ptr->appnum, exe);*/

	    if (smpd_process.mpiexec_run_local == SMPD_TRUE){
		    /* -localonly option and dynamic rPMI initialization */
		    env_str = &process->env[strlen(process->env)];
		    maxlen = (int)(SMPD_MAX_ENV_LENGTH - strlen(process->env));
		    MPIU_Str_add_int_arg(&env_str, &maxlen, "PMI_RANK", launch_node_ptr->iproc);
		    MPIU_Str_add_int_arg(&env_str, &maxlen, "PMI_SIZE", launch_node_ptr->nproc);
		    MPIU_Str_add_string_arg(&env_str, &maxlen, "PMI_KVS", smpd_process.kvs_name);
		    MPIU_Str_add_string_arg(&env_str, &maxlen, "PMI_ROOT_HOST", root_host);
		    MPIU_Str_add_int_arg(&env_str, &maxlen, "PMI_ROOT_PORT", root_port);
		    MPIU_Str_add_string_arg(&env_str, &maxlen, "PMI_ROOT_LOCAL", "0");
		    MPIU_Str_add_int_arg(&env_str, &maxlen, "PMI_APPNUM", launch_node_ptr->appnum);
		    MPIU_Strncpy(process->exe, exe, SMPD_MAX_EXE_LENGTH);
    	}
	    else{
		    /* ssh and dynamic rPMI initialization */
			    char fmtEnv[SMPD_MAX_ENV_LENGTH];
		    int fmtEnvLen = SMPD_MAX_ENV_LENGTH;
		    char *pExe = process->exe;
		    int curLen = 0;
		    MPIU_Snprintf(pExe, SMPD_MAX_EXE_LENGTH, "%s %s env", ssh_cmd, launch_node_ptr->hostname);
		    curLen = strlen(process->exe);
		    pExe = process->exe + curLen;
			if(FmtEnvVarsForSSH(launch_node_ptr->env, fmtEnv, fmtEnvLen)){
			    /* Add user specified env vars */
			    MPIU_Snprintf(pExe, SMPD_MAX_EXE_LENGTH - curLen, "%s", fmtEnv);
			    curLen = strlen(process->exe);
			    pExe = process->exe + curLen;
		    }
		    MPIU_Snprintf(pExe, SMPD_MAX_EXE_LENGTH - curLen, " \"PMI_RANK=%d\" \"PMI_SIZE=%d\" \"PMI_KVS=%s\" \"PMI_ROOT_HOST=%s\" \"PMI_ROOT_PORT=%d\" \"PMI_ROOT_LOCAL=0\" \"PMI_APPNUM=%d\" %s",
		    launch_node_ptr->iproc, launch_node_ptr->nproc, smpd_process.kvs_name, root_host, root_port, launch_node_ptr->appnum, exe);
	    }

	    MPIU_Strncpy(process->kvs_name, smpd_process.kvs_name, SMPD_MAX_DBS_NAME_LEN);
	    process->nproc = launch_node_ptr->nproc;
	    MPIU_Strncpy(process->path, launch_node_ptr->path, SMPD_MAX_PATH_LENGTH);

	    /* call smpd_launch_process */
	    smpd_dbg_printf("launching: %s\n", process->exe);
	    result = smpd_launch_process(process, SMPD_DEFAULT_PRIORITY_CLASS, SMPD_DEFAULT_PRIORITY, SMPD_FALSE, set);
	    if (result != SMPD_SUCCESS){
		    smpd_err_printf("unable to launch process %d <%s>.\n", i, process->exe);
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }
	    /* save the new process in the list */
	    process->next = smpd_process.process_list;
	    smpd_process.process_list = process;
	    if (i == 0){
		    /* start the stdin redirection thread to the first process */
		    setup_stdin_redirection(process, set);
	    }

	    smpd_process.nproc_launched++;
	    processes[i] = process;
	    launch_node_ptr = launch_node_ptr->next;
    } /* for (i=0; i<smpd_process.launch_list->nproc; i++) */
    
    if (launch_node_ptr != NULL){
	    smpd_err_printf("Error: too many launch nodes.\n");
	    smpd_exit_fn("mpiexec_rsh");
	    return SMPD_FAIL;
    }

    /* Start the timeout mechanism if specified */
    if (smpd_process.timeout > 0){
	    smpd_context_t *reader_context;
	    SMPDU_Sock_t sock_reader;
	    SMPDU_SOCK_NATIVE_FD reader, writer;
#ifdef HAVE_WINDOWS_H
	    /*SOCKET reader, writer;*/
	    smpd_make_socket_loop((SOCKET*)&reader, (SOCKET*)&writer);
#else
	    /*int reader, writer;*/
	    int pair[2];
	    socketpair(AF_UNIX, SOCK_STREAM, 0, pair);
	    reader = pair[0];
	    writer = pair[1];
#endif
	    result = SMPDU_Sock_native_to_sock(set, reader, NULL, &sock_reader);
	    result = SMPDU_Sock_native_to_sock(set, writer, NULL, &smpd_process.timeout_sock);
	    result = smpd_create_context(SMPD_CONTEXT_TIMEOUT, set, sock_reader, -1, &reader_context);
	    reader_context->read_state = SMPD_READING_TIMEOUT;
	    result = SMPDU_Sock_post_read(sock_reader, &reader_context->read_cmd.cmd, 1, 1, NULL);
#ifdef HAVE_WINDOWS_H
	    /* create a Windows thread to sleep until the timeout expires */
	    smpd_process.timeout_thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)timeout_thread, NULL, 0, NULL);
	    if (smpd_process.timeout_thread == NULL){
		    printf("Error: unable to create a timeout thread, errno %d.\n", GetLastError());
		    smpd_exit_fn("mp_parse_command_args");
		    return SMPD_FAIL;
	    }
#else /* HAVE_WINDOWS_H */
#ifdef SIGALRM
	    /* create an alarm to signal mpiexec when the timeout expires */
	    smpd_signal(SIGALRM, timeout_function);
	    alarm(smpd_process.timeout);
#else /* SIGALARM */
#ifdef HAVE_PTHREAD_H
	    /* create a pthread to sleep until the timeout expires */
	    result = pthread_create(&smpd_process.timeout_thread, NULL, timeout_thread, NULL);
	    if (result != 0){
		    printf("Error: unable to create a timeout thread, errno %d.\n", result);
		    smpd_exit_fn("mp_parse_command_args");
		    return SMPD_FAIL;
	    }
#else /* HAVE_PTHREAD_H */
	/* no timeout mechanism available */
#endif /* HAVE_PTHREAD_H */
#endif /* SIGALARM */
#endif /* HAVE_WINDOWS_H */
    } /* if (smpd_process.timeout > 0) */

    result = smpd_enter_at_state(set, SMPD_IDLE);
    if (result != SMPD_SUCCESS){
	    smpd_err_printf("mpiexec_rsh state machine failed.\n");
	    smpd_exit_fn("mpiexec_rsh");
	    return SMPD_FAIL;
    }

    if (smpd_process.use_pmi_server){
	    result = stop_pmi_server();
	    if (result != SMPD_SUCCESS){
		    smpd_err_printf("mpiexec_rsh unable to stop the pmi server.\n");
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }
    }
    else{
	    /* Send an abort command to the root_smpd thread/process to insure that it exits.
	     * This only needs to be sent when there is an error or failed process of some sort
	     * but it is safe to send it in all cases.
	     */
	    result = smpd_create_command("abort", 0, 0, SMPD_FALSE, &cmd_ptr);
	    if (result != SMPD_SUCCESS){
		    smpd_err_printf("unable to create an abort command.\n");
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }
	    result = smpd_post_write_command(abort_context, cmd_ptr);
	    if (result != SMPD_SUCCESS){
		    /* Only print this as a debug message instead of an error because the root_smpd thread/process may have already exited. */
		    smpd_dbg_printf("unable to post a write of the abort command to the %s context.\n", smpd_get_context_str(abort_context));
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }

	    result = stop_root_smpd(hnd);
	    if (result != PMI_SUCCESS){
		    smpd_err_printf("mpiexec_rsh unable to stop the root smpd.\n");
		    smpd_exit_fn("mpiexec_rsh");
		    return SMPD_FAIL;
	    }
    }

    smpd_exit_fn("mpiexec_rsh");
    return 0;
}
Exemplo n.º 3
0
int main(int argc, char* argv[])
{
    int result = SMPD_SUCCESS;
    smpd_host_node_t *host_node_ptr;
    smpd_launch_node_t *launch_node_ptr;
    smpd_context_t *context;
    SMPDU_Sock_set_t set;
    SMPDU_Sock_t sock = SMPDU_SOCK_INVALID_SOCK;
    smpd_state_t state;

    smpd_enter_fn("main");

    /* catch an empty command line */
    if (argc < 2)
    {
	mp_print_options();
	exit(0);
    }

    smpd_process.mpiexec_argv0 = argv[0];

    /* initialize */
    /* FIXME: Get rid of this hack - we already create 
     * local KVS for all singleton clients by default
     */
    putenv("PMI_SMPD_FD=0");
    result = PMPI_Init(&argc, &argv);
    /* SMPD_CS_ENTER(); */
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("SMPD_Init failed,\nerror: %d\n", result);
	smpd_exit_fn("main");
	return result;
    }

    result = SMPDU_Sock_init();
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("SMPDU_Sock_init failed,\nsock error: %s\n",
		      get_sock_error_string(result));
	smpd_exit_fn("main");
	return result;
    }

    result = smpd_init_process();
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("smpd_init_process failed.\n");
	goto quit_job;
    }

    smpd_process.dbg_state = SMPD_DBG_STATE_ERROUT;

    /* parse the command line */
    smpd_dbg_printf("parsing the command line.\n");
    result = mp_parse_command_args(&argc, &argv);
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("Unable to parse the mpiexec command arguments.\n");
	goto quit_job;
    }

    /* If we are using MS HPC job scheduler we only connect
     * to the local SMPD
     */
    if(smpd_process.use_ms_hpc){
        char host[100];
        int id;
        /* Free the current host list */
        result = smpd_free_host_list();
        if(result != SMPD_SUCCESS){
            smpd_err_printf("Unable to free the global host list\n");
            goto quit_job;
        }
        /* Add local host to the host list */
        result = smpd_get_hostname(host, 100);
        if(result != SMPD_SUCCESS){
            smpd_err_printf("Unable to get the local hostname\n");
            goto quit_job;
        }
	    result = smpd_get_host_id(host, &id);
        if(result != SMPD_SUCCESS){
            smpd_err_printf("Unable to get host id for local host\n");
            goto quit_job;
        }
        /* Set the number of PMI procs since they are not launched by mpiexec */
        smpd_process.nproc = smpd_process.launch_list->nproc;
        smpd_dbg_printf("Adding (%s:%d) == (localhost) to the host list\n", host, id);
    }

    /* print and see what we've got */
    /* debugging output *************/
    smpd_dbg_printf("host tree:\n");
    host_node_ptr = smpd_process.host_list;
    if (!host_node_ptr)
	smpd_dbg_printf("<none>\n");
    while (host_node_ptr)
    {
	smpd_dbg_printf(" host: %s, parent: %d, id: %d\n",
	    host_node_ptr->host,
	    host_node_ptr->parent, host_node_ptr->id);
	host_node_ptr = host_node_ptr->next;
    }
    smpd_dbg_printf("launch nodes:\n");
    launch_node_ptr = smpd_process.launch_list;
    if (!launch_node_ptr)
	smpd_dbg_printf("<none>\n");
    while (launch_node_ptr)
    {
	smpd_dbg_printf(" iproc: %d, id: %d, exe: %s\n",
	    launch_node_ptr->iproc, launch_node_ptr->host_id,
	    launch_node_ptr->exe);
	launch_node_ptr = launch_node_ptr->next;
    }
    /* end debug output *************/

    /* set the id of the mpiexec node to zero */
    smpd_process.id = 0;

    result = SMPDU_Sock_create_set(&set);
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("SMPDU_Sock_create_set failed,\nsock error: %s\n", get_sock_error_string(result));
	goto quit_job;
    }
    smpd_process.set = set;

    /* Check to see if the user wants to use a remote shell mechanism for launching the processes
     * instead of using the smpd process managers.
     */
    if (smpd_process.rsh_mpiexec == SMPD_TRUE)
    {
	/* Do rsh or localonly stuff */
	result = mpiexec_rsh();

	/* skip over the non-rsh code and go to the cleanup section */
	goto quit_job;
    }

    /* Start the timeout mechanism if specified */
    /* This code occurs after the rsh_mpiexec option check because the rsh code implementes timeouts differently */
    if (smpd_process.timeout > 0)
    {
#ifdef HAVE_WINDOWS_H
	/* create a Windows thread to sleep until the timeout expires */
	if (smpd_process.timeout_thread == NULL)
	{
	    smpd_process.timeout_thread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)timeout_thread, NULL, 0, NULL);
	    if (smpd_process.timeout_thread == NULL)
	    {
		printf("Error: unable to create a timeout thread, errno %d.\n", GetLastError());
		smpd_exit_fn("mp_parse_command_args");
		return SMPD_FAIL;
	    }
	}
#elif defined(SIGALRM)
	/* create an alarm to signal mpiexec when the timeout expires */
	smpd_signal(SIGALRM, timeout_function);
	alarm(smpd_process.timeout);
#elif defined(HAVE_PTHREAD_H)
	/* create a pthread to sleep until the timeout expires */
	result = pthread_create(&smpd_process.timeout_thread, NULL, timeout_thread, NULL);
	if (result != 0)
	{
	    printf("Error: unable to create a timeout thread, errno %d.\n", result);
	    smpd_exit_fn("mp_parse_command_args");
	    return SMPD_FAIL;
	}
#else
	/* no timeout mechanism available */
#endif
    }

    /* make sure we have a passphrase to authenticate connections to the smpds */
    if (smpd_process.passphrase[0] == '\0')
	smpd_get_smpd_data("phrase", smpd_process.passphrase, SMPD_PASSPHRASE_MAX_LENGTH);
    if (smpd_process.passphrase[0] == '\0')
    {
	if (smpd_process.noprompt)
	{
	    printf("Error: No smpd passphrase specified through the registry or .smpd file, exiting.\n");
	    result = SMPD_FAIL;
	    goto quit_job;
	}
	printf("Please specify an authentication passphrase for smpd: ");
	fflush(stdout);
	smpd_get_password(smpd_process.passphrase);
    }

    /* set the state to create a console session or a job session */
    state = smpd_process.do_console ? SMPD_MPIEXEC_CONNECTING_SMPD : SMPD_MPIEXEC_CONNECTING_TREE;

    result = smpd_create_context(SMPD_CONTEXT_LEFT_CHILD, set, sock, 1, &context);
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("unable to create a context for the first host in the tree.\n");
	goto quit_job;
    }
#ifdef HAVE_WINDOWS_H
    if (!smpd_process.local_root)
    {
#endif
	/* start connecting the tree by posting a connect to the first host */
	result = SMPDU_Sock_post_connect(set, context, smpd_process.host_list->host, smpd_process.port, &sock);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("Unable to connect to '%s:%d',\nsock error: %s\n",
		smpd_process.host_list->host, smpd_process.port, get_sock_error_string(result));
	    goto quit_job;
	}
#ifdef HAVE_WINDOWS_H
    }
#endif
    context->sock = sock;
    context->state = state;
    context->connect_to = smpd_process.host_list;
#ifdef HAVE_WINDOWS_H
    if (smpd_process.local_root)
    {
	int port;
	smpd_context_t *rc_context;

	/* The local_root option is implemented by having mpiexec act as the smpd
	 * and launch the smpd manager.  Then mpiexec connects to this manager just
	 * as if it had been created by a real smpd.  This causes all the processes
	 * destined for the first smpd host to be launched by this child process of
	 * mpiexec and not the smpd service.  This allows for these processes to
	 * create windows that are visible to the interactive user.  It also means 
	 * that the job cannot be run in the context of a user other than the user
	 * running mpiexec. */

	/* get the path to smpd.exe because pszExe is currently mpiexec.exe */
	smpd_get_smpd_data("binary", smpd_process.pszExe, SMPD_MAX_EXE_LENGTH);

	/* launch the manager process */
	result = smpd_start_win_mgr(context, SMPD_FALSE);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("unable to start the local smpd manager.\n");
	    goto quit_job;
	}

	/* connect to the manager */
	smpd_dbg_printf("connecting a new socket.\n");
	port = atol(context->port_str);
	if (port < 1)
	{
	    smpd_err_printf("Invalid reconnect port read: %d\n", port);
	    goto quit_job;
	}
	result = smpd_create_context(context->type, context->set, SMPDU_SOCK_INVALID_SOCK, context->id, &rc_context);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("unable to create a new context for the reconnection.\n");
	    goto quit_job;
	}
	rc_context->state = context->state;
	rc_context->write_state = SMPD_RECONNECTING;
	context->state = SMPD_CLOSING;
	rc_context->connect_to = context->connect_to;
	rc_context->connect_return_id = context->connect_return_id;
	rc_context->connect_return_tag = context->connect_return_tag;
	strcpy(rc_context->host, context->host);
	smpd_process.left_context = rc_context;
	smpd_dbg_printf("posting a re-connect to %s:%d in %s context.\n", rc_context->connect_to->host, port, smpd_get_context_str(rc_context));
	result = SMPDU_Sock_post_connect(rc_context->set, rc_context, rc_context->connect_to->host, port, &rc_context->sock);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("Unable to post a connect to '%s:%d',\nsock error: %s\n",
		rc_context->connect_to->host, port, get_sock_error_string(result));
	    if (smpd_post_abort_command("Unable to connect to '%s:%d',\nsock error: %s\n",
		rc_context->connect_to->host, port, get_sock_error_string(result)) != SMPD_SUCCESS)
	    {
		goto quit_job;
	    }
	}
    }
    else
    {
#endif
	smpd_process.left_context = context;
	result = SMPDU_Sock_set_user_ptr(sock, context);
	if (result != SMPD_SUCCESS)
	{
	    smpd_err_printf("unable to set the smpd sock user pointer,\nsock error: %s\n",
		get_sock_error_string(result));
	    goto quit_job;
	}
#ifdef HAVE_WINDOWS_H
    }
#endif

#ifdef HAVE_WINDOWS_H
    {
	/* Create a break handler and a socket to handle aborting the job when mpiexec receives break signals */
	smpd_context_t *reader_context;
	SMPDU_Sock_t sock_reader;
	SMPDU_SOCK_NATIVE_FD reader, writer;

	smpd_make_socket_loop((SOCKET*)&reader, (SOCKET*)&writer);
	result = SMPDU_Sock_native_to_sock(set, reader, NULL, &sock_reader);
	result = SMPDU_Sock_native_to_sock(set, writer, NULL, &smpd_process.mpiexec_abort_sock);
	result = smpd_create_context(SMPD_CONTEXT_MPIEXEC_ABORT, set, sock_reader, -1, &reader_context);
	reader_context->read_state = SMPD_READING_MPIEXEC_ABORT;
	result = SMPDU_Sock_post_read(sock_reader, &reader_context->read_cmd.cmd, 1, 1, NULL);

	if (!SetConsoleCtrlHandler(mpiexec_ctrl_handler, TRUE))
	{
	    /* Don't error out; allow the job to run without a ctrl handler? */
	    result = GetLastError();
	    smpd_dbg_printf("unable to set a ctrl handler for mpiexec, error %d\n", result);
	}
    }
#endif

    result = smpd_enter_at_state(set, state);
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("state machine failed.\n");
	goto quit_job;
    }

quit_job:

    if ((result != SMPD_SUCCESS) && (smpd_process.mpiexec_exit_code == 0))
    {
	smpd_process.mpiexec_exit_code = -1;
    }

    /* finalize */

    smpd_dbg_printf("calling SMPDU_Sock_finalize\n");
    result = SMPDU_Sock_finalize();
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("SMPDU_Sock_finalize failed,\nsock error: %s\n", get_sock_error_string(result));
    }

    /* SMPD_Finalize called in smpd_exit()
    smpd_dbg_printf("calling SMPD_Finalize\n");
    result = PMPI_Finalize();
    if (result != SMPD_SUCCESS)
    {
	smpd_err_printf("SMPD_Finalize failed,\nerror: %d\n", result);
    }
    */

#ifdef HAVE_WINDOWS_H
    if (smpd_process.hCloseStdinThreadEvent)
	SetEvent(smpd_process.hCloseStdinThreadEvent);
    if (smpd_process.hStdinThread != NULL)
    {
	/* close stdin so the input thread will exit */
	CloseHandle(GetStdHandle(STD_INPUT_HANDLE));
	if (WaitForSingleObject(smpd_process.hStdinThread, 3000) != WAIT_OBJECT_0)
	{
	    TerminateThread(smpd_process.hStdinThread, 321);
	}
	CloseHandle(smpd_process.hStdinThread);
    }
    if (smpd_process.hCloseStdinThreadEvent)
    {
	CloseHandle(smpd_process.hCloseStdinThreadEvent);
	smpd_process.hCloseStdinThreadEvent = NULL;
    }
#elif defined(USE_PTHREAD_STDIN_REDIRECTION)
    smpd_cancel_stdin_thread();
#endif
    smpd_exit_fn("main");
    /* SMPD_CS_EXIT(); */
    return smpd_exit(smpd_process.mpiexec_exit_code);
}