static 
void
l_begin_xfer(
    globus_ftp_client_handle_t *        ftp_handle,
    globus_l_ftp_client_restart_plugin_t *  d)
{
    globus_reltime_t                    period;

    if(!d->stall_timeout)
    {
        return;
    }
    d->ticker = 0;
    d->ticker_ftp_handle = ftp_handle;
    d->xfer_running = GLOBUS_TRUE;
    d->ticker_set = GLOBUS_TRUE;
    
    /* start a timer, half of stall timeout */
    /* we won't restart/abort until the 2nd callback with no data */
    GlobusTimeReltimeSet(period, d->stall_timeout/2, 0);
    globus_callback_register_periodic(
        &d->ticker_handle,
        &period,
        &period,
        l_ticker_cb,
        d);
}
int
globus_i_xio_win32_complete_activate(void)
{
    globus_result_t                     result;
    globus_reltime_t                    period;
    GlobusXIOName(globus_i_xio_win32_complete_activate);
    
    GlobusXIOSystemDebugEnter();
    
    if (!globus_i_am_only_thread())
    {
        goto skip_activate;
    }
    GlobusLWin32PollQueueInit();
    win32_mutex_init(&globus_l_xio_win32_poll_lock, 0);
    globus_l_xio_win32_poll_event_sleeping = GLOBUS_FALSE;
    globus_l_xio_win32_poll_event_pending = GLOBUS_FALSE;
    globus_l_xio_win32_poll_free = 0;
    
    globus_l_xio_win32_poll_event = CreateEvent(0, FALSE, FALSE, 0);
    if(globus_l_xio_win32_poll_event == 0)
    {
        goto error_event;
    }
    
    GlobusTimeReltimeSet(period, 0, 0);
    result = globus_callback_register_periodic(
        &globus_l_xio_win32_poll_handle,
        0,
        &period,
        globus_l_xio_win32_poll,
        0);
    if(result != GLOBUS_SUCCESS)
    {
        goto error_periodic;
    }
    
    globus_callback_add_wakeup_handler(
        globus_l_xio_win32_wakeup_handler, 0);
    
skip_activate:
    GlobusXIOSystemDebugExit();
    
    return GLOBUS_SUCCESS;
    
error_periodic:
    CloseHandle(globus_l_xio_win32_poll_event);
    globus_l_xio_win32_poll_event = 0;
error_event:
    win32_mutex_destroy(&globus_l_xio_win32_poll_lock);
    GlobusXIOSystemDebugExitWithError();
    return GLOBUS_FAILURE;
}
Exemple #3
0
int
main(
    int                                 argc,
    char **                             argv)
{
    int                                 rc;
    globus_gram_job_manager_config_t    config;
    globus_gram_job_manager_t           manager;
    char *                              sleeptime_str;
    long                                sleeptime = 0;
    globus_bool_t                       debug_mode_service = GLOBUS_FALSE;
    globus_bool_t                       located_active_jm = GLOBUS_FALSE;
    int                                 http_body_fd = -1;
    int                                 context_fd = -1;
    gss_cred_id_t                       cred = GSS_C_NO_CREDENTIAL;
    OM_uint32                           major_status, minor_status;
    pid_t                               forked_starter = 0;
    globus_bool_t                       cgi_invoked = GLOBUS_FALSE;
    int                                 lock_tries_left = 10;

    if ((sleeptime_str = getenv("GLOBUS_JOB_MANAGER_SLEEP")))
    {
        sleeptime = atoi(sleeptime_str);
        sleep(sleeptime);
    }
    if (getenv("GATEWAY_INTERFACE"))
    {
        cgi_invoked = GLOBUS_TRUE;
    }
    /*
     * Stdin and stdout point at socket to client
     * Make sure no buffering.
     * stderr may also, depending on the option in the grid-services
     */
    setbuf(stdout,NULL);
    /* Don't export these to the perl scripts */
    fcntl(STDIN_FILENO, F_SETFD, (int) 1);
    fcntl(STDOUT_FILENO, F_SETFD, (int) 1);
    fcntl(STDERR_FILENO, F_SETFD, (int) 1);

    /*
     * At least have minimal POSIX path for job environment via extra
     * environment values
     */
    if(getenv("PATH") == NULL)
    {
        char * path;
        char default_path[] = "/usr/bin:/bin";
        size_t pathlen;

        pathlen = confstr(_CS_PATH, NULL, (size_t) 0);

        if (pathlen < sizeof(default_path))
        {
            pathlen = sizeof(default_path);
        }
        path = malloc(pathlen);
        path[0] = 0;

        (void) confstr(_CS_PATH, path, pathlen);
        if (path[0] == 0)
        {
            strncpy(path, default_path, pathlen);
        }
        setenv("PATH", path, 1);
    }

    /* Force non-threaded execution for now */
    globus_thread_set_model(GLOBUS_THREAD_MODEL_NONE);

    /* Activate a common before parsing command-line so that
     * things work. Note that we can't activate everything yet because we might
     * set the GLOBUS_TCP_PORT_RANGE after parsing command-line args and we
     * need that set before activating XIO.
     */
    rc = globus_module_activate(GLOBUS_COMMON_MODULE);
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "Error activating GLOBUS_COMMON_MODULE\n");
        exit(1);
    }

    /* Parse command line options to get jobmanager configuration */
    rc = globus_gram_job_manager_config_init(&config, argc, argv);
    if (rc != GLOBUS_SUCCESS)
    {
        reply_and_exit(NULL, rc, NULL);
    }

    globus_thread_key_create(
            &globus_i_gram_request_key,
            NULL);

    rc = globus_gram_job_manager_logging_init(&config);
    if (rc != GLOBUS_SUCCESS)
    {
        exit(1);
    }
    if (getenv("GRID_SECURITY_HTTP_BODY_FD") == NULL && !cgi_invoked)
    {
        debug_mode_service = GLOBUS_TRUE;
    }
    /* Set environment variables from configuration */
    if(config.globus_location != NULL)
    {
        globus_libc_setenv("GLOBUS_LOCATION",
                           config.globus_location,
                           GLOBUS_TRUE);
    }
    if(config.tcp_port_range != NULL)
    {
        globus_libc_setenv("GLOBUS_TCP_PORT_RANGE",
                           config.tcp_port_range,
                           GLOBUS_TRUE);
    }
    if(config.tcp_source_range != NULL)
    {
        globus_libc_setenv("GLOBUS_TCP_SOURCE_RANGE",
                           config.tcp_source_range,
                           GLOBUS_TRUE);
    }

    /* Activate all of the modules we will be using */
    rc = globus_l_gram_job_manager_activate();
    if(rc != GLOBUS_SUCCESS)
    {
        exit(1);
    }

    /*
     * Get the delegated credential (or the default credential if we are
     * run without a client. Don't care about errors in the latter case.
     */
    major_status = globus_gss_assist_acquire_cred(
            &minor_status,
            GSS_C_BOTH,
            &cred);
    if ((!debug_mode_service) && GSS_ERROR(major_status))
    {
        globus_gss_assist_display_status(
                stderr,
                "Error acquiring security credential\n",
                major_status,
                minor_status,
                0);
        exit(1);
    }

    if (cred != GSS_C_NO_CREDENTIAL)
    {
        unsigned long hash;
        char * newtag;

        rc = globus_gram_gsi_get_dn_hash(
                cred,
                &hash);
        if (rc == GLOBUS_SUCCESS)
        {
            newtag = globus_common_create_string("%s%s%lx",
                    strcmp(config.service_tag, "untagged") == 0
                            ? "" : config.service_tag,
                    strcmp(config.service_tag, "untagged") == 0
                            ? "" : ".",
                    hash);
            free(config.service_tag);
            config.service_tag = newtag;
        }
    }

    /*
     * Remove delegated proxy from disk.
     */
    if ((!debug_mode_service) && getenv("X509_USER_PROXY") != NULL)
    {
        remove(getenv("X509_USER_PROXY"));
        unsetenv("X509_USER_PROXY");
    }

    /* Set up LRM-specific state based on our configuration. This will create
     * the job contact listener, start the SEG if needed, and open the log
     * file if needed.
     */
    rc = globus_gram_job_manager_init(&manager, cred, &config);
    if(rc != GLOBUS_SUCCESS)
    {
        reply_and_exit(NULL, rc, manager.gt3_failure_message);
    }

    /*
     * Pull out file descriptor numbers for security context and job request
     * from the environment (set by the gatekeeper)
     */
    if (cgi_invoked)
    {
        http_body_fd = 0;
        context_fd = -1;
    }
    else if (!debug_mode_service)
    {
        char * fd_env = getenv("GRID_SECURITY_HTTP_BODY_FD");

        rc = sscanf(fd_env ? fd_env : "-1", "%d", &http_body_fd);
        if (rc != 1 || http_body_fd < 0)
        {
            fprintf(stderr, "Error locating http body fd\n");
            exit(1);
        }
        fcntl(http_body_fd, F_SETFD, 1);

        fd_env = getenv("GRID_SECURITY_CONTEXT_FD");
        rc = sscanf(fd_env ? fd_env : "-1", "%d", &context_fd);
        if (rc != 1 || context_fd < 0)
        {
            fprintf(stderr, "Error locating security context fd\n");
            exit(1);
        }
        fcntl(context_fd, F_SETFD, 1);
    }


    /* Redirect stdin from /dev/null, we'll handle stdout after the reply is
     * sent
     */
    if (!cgi_invoked)
    {
        freopen("/dev/null", "r", stdin);
    }

    /* Here we'll either become the active job manager to process all
     * jobs for this user/host/lrm combination, or we'll hand off the
     * file descriptors containing the info to the active job manager
     */
    while (!located_active_jm)
    {
        /* We'll try to get the lock file associated with being the
         * active job manager here. If we get the OLD_JM_ALIVE error
         * somebody else has it
         */
        rc = globus_gram_job_manager_startup_lock(
                &manager,
                &manager.lock_fd);
        if (rc == GLOBUS_SUCCESS)
        {
            /* We've acquired the lock. We will fork a new process to act like
             * all other job managers which don't have the lock, and continue
             * on in this process managing jobs for this LRM.  Note that the
             * child process does not inherit the lock
             */
            if (!debug_mode_service)
            {
                int save_errno = 0;

                /* We've acquired the manager lock */
                forked_starter = fork();
                save_errno = errno;

                if (forked_starter < 0)
                {
                    if (sleeptime != 0)
                    {
                        sleep(sleeptime);
                    }

                    fprintf(stderr, "fork failed: %s", strerror(save_errno));
                    exit(1);
                }
                else if (forked_starter == 0)
                {
                    /* We are the child process. We'll close our reference to
                     * the lock and let the other process deal with jobs
                     */
                    close(manager.lock_fd);
                    manager.lock_fd = -1;
                }
                globus_logging_update_pid();
                if (sleeptime != 0)
                {
                    sleep(sleeptime);
                }

            }

            if (manager.lock_fd >= 0)
            {
                /* We hold the manager lock, so we'll store our credential, and
                 * then, try to accept socket connections. If the socket
                 * connections fail, we'll exit, and another process
                 * will be forked to handle them.
                 */
                rc = globus_gram_job_manager_gsi_write_credential(
                        NULL,
                        cred,
                        manager.cred_path);

                if (rc != GLOBUS_SUCCESS)
                {
                    fprintf(stderr, "write cred failed\n");
                    exit(1);
                }
                if (!debug_mode_service)
                {
                    close(http_body_fd);
                    http_body_fd = -1;
                }

                rc = globus_gram_job_manager_startup_socket_init(
                        &manager,
                        &manager.active_job_manager_handle,
                        &manager.socket_fd);
                if (rc != GLOBUS_SUCCESS)
                {
                    /* This releases our lock. Either the child process will
                     * attempt to acquire the lock again or some another job
                     * manager will acquire the lock
                     */
                    exit(0);
                }
                assert(manager.socket_fd != -1);
            }
        }
        else if (rc != GLOBUS_GRAM_PROTOCOL_ERROR_OLD_JM_ALIVE)
        {
            /* Some system error. Try again */
            if (--lock_tries_left == 0)
            {
                reply_and_exit(NULL, rc, "Unable to create lock file");
            }
            sleep(1);
            continue;
        }

        /* If manager.socket_fd != -1 then we are the main job manager for this
         * LRM.
         * We will restart all existing jobs and then allow the startup
         * socket to accept new jobs from other job managers.
         */
        if (manager.socket_fd != -1)
        {
            /* Look up cputype/manufacturer if not known yet */
            globus_l_gram_cputype_and_manufacturer(manager.config);

            GlobusTimeAbstimeGetCurrent(manager.usagetracker->jm_start_time);            
            globus_i_gram_usage_stats_init(&manager);
            globus_i_gram_usage_start_session_stats(&manager);

            located_active_jm = GLOBUS_TRUE;

            /* Load existing jobs. The show must go on if this fails, unless it
             * fails with a misconfiguration error
             */
            rc = globus_gram_job_manager_request_load_all(
                    &manager);
            if (rc == GLOBUS_GRAM_PROTOCOL_ERROR_GATEKEEPER_MISCONFIGURED)
            {
                if (forked_starter > 0)
                {
                    kill(forked_starter, SIGTERM);
                    forked_starter = 0;
                }
                reply_and_exit(NULL, rc, manager.gt3_failure_message);
            }
            if (context_fd != -1)
            {
                close(context_fd);
                context_fd = -1;
            }
            freopen("/dev/null", "a", stdout);

            /* At this point, seg_last_timestamp is the earliest last timestamp 
             * for any pre-existing jobs. If that is 0, then we don't have any
             * existing jobs so we'll just ignore seg events prior to now.
             */
            if (manager.seg_last_timestamp == 0)
            {
                manager.seg_last_timestamp = time(NULL);
            }

            /* Start off the SEG if we need it.
             */
            if (config.seg_module != NULL || 
                strcmp(config.jobmanager_type, "fork") == 0 ||
                strcmp(config.jobmanager_type, "condor") == 0)
            {
                rc = globus_gram_job_manager_init_seg(&manager);

                /* TODO: If SEG load fails and load_all added some to the 
                 * job_id hash, they will need to be pushed into the state
                 * machine so that polling fallback can happen.
                 */
                if (rc != GLOBUS_SUCCESS)
                {
                    config.seg_module = NULL;
                }
            }
            /* GRAM-128:
             * Register a periodic event to process the GRAM jobs that were
             * reloaded from their job state files at job manager start time.
             * This will acquire and then release a reference to each job,
             * which, behind the scenes, will kick of the state machine
             * for that job if needed.
             */
            if (!globus_list_empty(manager.pending_restarts))
            {
                globus_reltime_t        restart_period;

                GlobusTimeReltimeSet(restart_period, 1, 0);

                rc = globus_callback_register_periodic(
                        &manager.pending_restart_handle,
                        NULL,
                        &restart_period,
                        globus_l_gram_process_pending_restarts,
                        &manager);
                        
            }

            {
                globus_reltime_t        expire_period;

                GlobusTimeReltimeSet(expire_period, 1, 0);

                rc = globus_callback_register_periodic(
                    &manager.expiration_handle,
                    NULL,
                    &expire_period,
                    globus_gram_job_manager_expire_old_jobs,
                    &manager);
            }

            {
                globus_reltime_t        lockcheck_period;

                GlobusTimeReltimeSet(lockcheck_period, 60, 0);

                rc = globus_callback_register_periodic(
                    &manager.lockcheck_handle,
                    NULL,
                    &lockcheck_period,
                    globus_l_gram_lockcheck,
                    &manager);
            }

            {
                globus_reltime_t        idlescript_period;

                GlobusTimeReltimeSet(idlescript_period, 60, 0);

                rc = globus_callback_register_periodic(
                    &manager.idle_script_handle,
                    NULL,
                    &idlescript_period,
                    globus_gram_script_close_idle,
                    &manager);
            }
        }
        else if (http_body_fd >= 0)
        {
            /* If manager.socket_fd == -1 then we are either the child from the
             * fork or another process started somehow (either command-line
             * invocation or via a job submit). If we have a client, then we'll
             * send our fds to the job manager with the lock and let it process
             * the job.
             *
             * If this succeeds, we set located_active_jm and leave the loop.
             * Otherwise, we try again.
             */
            if (context_fd >= 0)
            {
                rc = globus_gram_job_manager_starter_send(
                        &manager,
                        http_body_fd,
                        context_fd,
                        fileno(stdout),
                        cred);
            }
            else
            {
                rc = globus_gram_job_manager_starter_send_v2(
                        &manager,
                        cred);
            }
            if (rc == GLOBUS_SUCCESS)
            {
                located_active_jm = GLOBUS_TRUE;
                close(http_body_fd);
                if (context_fd >= 0)
                {
                    close(context_fd);
                }
                manager.done = GLOBUS_TRUE;
            }
            else
            {
                globus_libc_usleep(250000);
            }
        }
        else
        {
            /* We were started by hand, but another process is currently the
             * main job manager
             */
            unsigned long realpid = 0;
            FILE * pidin = fopen(manager.pid_path, "r");
            fscanf(pidin, "%lu", &realpid);
            fclose(pidin);

            fprintf(stderr, "Other job manager process with pid %lu running and processing jobs\n",
                    realpid);

            exit(0);
        }
    }

    /* Ignore SIGCHILD, and automatically reap child processes. Because of the
     * fork() above to delegate to another job manager process, and the use of
     * sub-processes to invoke the perl modules, we create some other
     * processes. We don't care too much how they exit, so we'll just make sure
     * we don't create zombies out of them.
     */
    {
        struct sigaction act;

        act.sa_handler = SIG_IGN;
        sigemptyset(&act.sa_mask);
        sigaddset(&act.sa_mask, SIGCHLD);
#ifdef SA_NOCLDWAIT
        act.sa_flags = SA_NOCLDWAIT;
#else
        /* This may leave zombies running on non-POSIX systems like Hurd */
        act.sa_flags = 0;
#endif
        sigaction(SIGCHLD, &act, NULL);
    }

    /* Enable log rotation via SIGUSR1 */
    {
        struct sigaction act;
        act.sa_handler = globus_i_job_manager_log_rotate;
        sigemptyset(&act.sa_mask);
        sigaddset(&act.sa_mask, SIGUSR1);
        act.sa_flags = 0;
        sigaction(SIGUSR1, &act, NULL);
    }
    
    GlobusGramJobManagerLock(&manager);
    if (manager.socket_fd != -1 &&
        globus_hashtable_empty(&manager.request_hash) &&
        manager.grace_period_timer == GLOBUS_NULL_HANDLE)
    {
        globus_gram_job_manager_set_grace_period_timer(&manager);
    }


    /* For the active job manager, this will block until all jobs have
     * terminated. For any other job manager, the monitor.done is set to
     * GLOBUS_TRUE and this falls right through.
     */
    while (! manager.done)
    {
        GlobusGramJobManagerWait(&manager);
    }
    if (manager.expiration_handle != GLOBUS_NULL_HANDLE)
    {
        globus_callback_unregister(manager.expiration_handle, NULL, NULL, NULL);
    }
    if (manager.lockcheck_handle != GLOBUS_NULL_HANDLE)
    {
        globus_callback_unregister(manager.lockcheck_handle, NULL, NULL, NULL);
    }
    if (manager.idle_script_handle != GLOBUS_NULL_HANDLE)
    {
        globus_callback_unregister(manager.idle_script_handle, NULL, NULL, NULL);
    }
    GlobusGramJobManagerUnlock(&manager);

    globus_gram_job_manager_log(
            &manager,
            GLOBUS_GRAM_JOB_MANAGER_LOG_DEBUG,
            "event=gram.end "
            "level=DEBUG "
            "\n");

    /* Clean-up to do if we are the active job manager only */
    if (manager.socket_fd != -1)
    {
        globus_gram_job_manager_script_close_all(&manager);
        globus_i_gram_usage_end_session_stats(&manager);
        globus_i_gram_usage_stats_destroy(&manager);
        remove(manager.pid_path);
        remove(manager.cred_path);
        remove(manager.socket_path);
        remove(manager.lock_path);
    }
    globus_gram_job_manager_logging_destroy();
    globus_gram_job_manager_destroy(&manager);
    globus_gram_job_manager_config_destroy(&config);

    rc = globus_l_gram_deactivate();
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "deactivation failed with rc=%d\n",
                rc);
        exit(1);
    }

/*
    {
        const char * gk_jm_id_var = "GATEKEEPER_JM_ID";
        const char * gk_jm_id = globus_libc_getenv(gk_jm_id_var);

        globus_gram_job_manager_request_acct(
                request,
                "%s %s JM exiting\n",
                gk_jm_id_var, gk_jm_id ? gk_jm_id : "none");
    }
*/


    return(0);
}
int
main(
    int                                 argc,
    char **                             argv)
{
    int                                 opt;
    globus_gram_streamer_monitor_t      monitor;
    int                                 rc;
    char                                local_path[16];
    globus_result_t                     result;
    globus_reltime_t                    period;
    globus_module_descriptor_t *        modules[] =
    {
        GLOBUS_COMMON_MODULE,
        GLOBUS_GASS_TRANSFER_MODULE,
        NULL
    };
    globus_module_descriptor_t *        failed_module;

    memset(&monitor, 0, sizeof(globus_gram_streamer_monitor_t));
    globus_mutex_init(&monitor.request.mutex, NULL);
    globus_cond_init(&monitor.request.cond, NULL);

    while ((opt = getopt(argc, argv, "s:p:d:h")) != -1)
    {
        switch (opt)
        {
            case 's':
                monitor.request.job_state_file = optarg;
                /*
                 * Assume that the remote I/O file will not be newer than the
                 * current time
                 */
                monitor.remote_io_url_file_time = time(NULL);
                rc = globus_gram_job_manager_state_file_read(&monitor.request);
                if (rc != GLOBUS_SUCCESS)
                {
                    fprintf(stderr, "%d:Error reading state file %s\n",
                            rc, optarg);
                }
                break;

            case 'p':
                if ((monitor.pid_count+1) == STREAMER_MAX)
                {
                    fprintf(stderr, "%d:Too many pids for streamer\n",
                            GLOBUS_GRAM_PROTOCOL_ERROR_NO_RESOURCES);
                    exit(EXIT_FAILURE);
                }
                monitor.pids[monitor.pid_count++] =
                        (pid_t) strtol(optarg, NULL, 10);
                break;

            case 'd':
                rc = chdir(optarg);
                if (rc != 0)
                {
                    int save_errno = errno;
                    fprintf(stderr,
                            "%d:Error accessing job state directory: %s (%d)\n",
                            GLOBUS_GRAM_PROTOCOL_ERROR_BAD_DIRECTORY,
                            strerror(save_errno),
                            save_errno);
                    exit(EXIT_FAILURE);
                }
                break;
            case 'h':
                printf("Usage: %s -s STATE-FILE -p pid [-p pid]...\n", argv[0]);
                exit(EXIT_SUCCESS);
                break;

            case '?':
            default:
                fprintf(stderr, "%d:Unknown option: %c\n",
                        GLOBUS_GRAM_PROTOCOL_ERROR_GATEKEEPER_MISCONFIGURED, 
                        (char) opt);
                exit(EXIT_FAILURE);
        }
    }

    rc = globus_module_activate_array(modules, &failed_module);
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "%d:Activation failed: %s %d\n",
                GLOBUS_GRAM_PROTOCOL_ERROR_GATEKEEPER_MISCONFIGURED,
                failed_module->module_name,
                rc);
        exit(EXIT_FAILURE);
    }

    strcpy(local_path, "stdout");
    monitor.output_stream.fd = open(local_path, O_RDONLY);

    strcpy(local_path, "stderr");
    monitor.error_stream.fd = open(local_path, O_RDONLY);

    rc = globus_mutex_init(&monitor.mutex, NULL);
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "%d:Mutex init failed\n",
                GLOBUS_GRAM_PROTOCOL_ERROR_MALLOC_FAILED);
        exit(EXIT_FAILURE);
    }
    rc = globus_cond_init(&monitor.cond, NULL);
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "%d:Mutex init failed\n",
                GLOBUS_GRAM_PROTOCOL_ERROR_MALLOC_FAILED);
        exit(EXIT_FAILURE);
    }

    globus_mutex_lock(&monitor.mutex);

    GlobusTimeReltimeSet(period, 5, 0);
    result = globus_callback_register_periodic(
            &monitor.local_poll_periodic,
            &globus_i_reltime_zero,
            &period,
            globus_l_gram_streamer_local_poll,
            &monitor);
    if (result != GLOBUS_SUCCESS)
    {
        char * errstr = globus_error_print_friendly(globus_error_peek(result));
        fprintf(stderr, "%d:Initialization error: %s\n",
                GLOBUS_GRAM_PROTOCOL_ERROR_MALLOC_FAILED,
                errstr);
        free(errstr);
        exit(EXIT_FAILURE);
    }

    result = globus_callback_register_periodic(
            &monitor.waitpids_poll_periodic,
            &globus_i_reltime_zero,
            &period,
            globus_l_gram_streamer_waitpids,
            &monitor);
    if (result != GLOBUS_SUCCESS)
    {
        char * errstr = globus_error_print_friendly(globus_error_peek(result));
        fprintf(stderr, "%d:Initialization error: %s\n",
                GLOBUS_GRAM_PROTOCOL_ERROR_MALLOC_FAILED,
                errstr);
        free(errstr);
        exit(EXIT_FAILURE);
    }

    rc = globus_l_gram_streamer_get_destinations(
            &monitor);
    if (rc != GLOBUS_SUCCESS)
    {
        exit(EXIT_FAILURE);
    }

    if (monitor.output_stream.fd != -1 &&
        monitor.output_stream.destination != NULL)
    {
        rc = globus_l_gram_streamer_open_destination(
                &monitor,
                &monitor.output_stream);
        if (rc != GLOBUS_SUCCESS)
        {
            fprintf(stderr, "%d:Error opening stdout destination %s (%d)\n",
                    GLOBUS_GRAM_PROTOCOL_ERROR_OPENING_STDOUT,
                    monitor.output_stream.destination,
                    rc);
            exit(EXIT_FAILURE);
        }
        monitor.output_stream.state = GLOBUS_GRAM_STREAM_NEW;
    }
    else
    {
        monitor.output_stream.state = GLOBUS_GRAM_STREAM_NONE;
    }
    if (monitor.error_stream.fd != -1 &&
        monitor.error_stream.destination != NULL)
    {
        rc = globus_l_gram_streamer_open_destination(
                &monitor,
                &monitor.error_stream);
        if (rc != GLOBUS_SUCCESS)
        {
            fprintf(stderr, "%d:Error opening stderr destination %s (%d)\n",
                    GLOBUS_GRAM_PROTOCOL_ERROR_OPENING_STDERR,
                    monitor.error_stream.destination,
                    rc);
            exit(EXIT_FAILURE);
        }
        monitor.error_stream.state = GLOBUS_GRAM_STREAM_NEW;
    }
    else
    {
        monitor.error_stream.state = GLOBUS_GRAM_STREAM_NONE;
    }

    while (monitor.pid_count > 0 ||
           (monitor.output_stream.state != GLOBUS_GRAM_STREAM_NONE &&
            monitor.output_stream.state != GLOBUS_GRAM_STREAM_DONE &&
            monitor.output_stream.state != GLOBUS_GRAM_STREAM_FAIL) ||
           (monitor.error_stream.state != GLOBUS_GRAM_STREAM_NONE &&
            monitor.error_stream.state != GLOBUS_GRAM_STREAM_DONE &&
            monitor.error_stream.state != GLOBUS_GRAM_STREAM_FAIL))
    {
        globus_cond_wait(&monitor.cond, &monitor.mutex);
    }
    if (monitor.output_stream.state == GLOBUS_GRAM_STREAM_DONE)
    {
        printf("%s %s\n",
               monitor.output_stream.source,
               monitor.output_stream.destination);
    }
    if (monitor.error_stream.state == GLOBUS_GRAM_STREAM_DONE)
    {
        printf("%s %s\n",
               monitor.error_stream.source,
               monitor.error_stream.destination);
    }
    globus_mutex_unlock(&monitor.mutex);
    globus_module_deactivate(GLOBUS_GASS_TRANSFER_MODULE);
    globus_module_activate(GLOBUS_COMMON_MODULE);

    exit(EXIT_SUCCESS);
}
void
globus_i_gsc_event_start_perf_restart(
    globus_i_gsc_op_t *                 op)
{
    globus_result_t                     res;
    globus_reltime_t                    delay;
    globus_i_gsc_event_data_t *         event;

    event = &op->event;

    if(op->type != GLOBUS_L_GSC_OP_TYPE_RECV && op->type != GLOBUS_L_GSC_OP_TYPE_SEND)
    {
        return;
    }

    /* performance markers */
    if(((op->type == GLOBUS_L_GSC_OP_TYPE_RECV && 
            op->server_handle->opts.perf_frequency > 0) || 
        (op->type == GLOBUS_L_GSC_OP_TYPE_SEND && 
            op->server_handle->opts.retr_perf_frequency > 0)) &&
        event->event_mask & GLOBUS_GRIDFTP_SERVER_CONTROL_EVENT_PERF)
    {
        event->stripe_count = op->server_handle->stripe_count;
        event->stripe_total = (globus_off_t *)globus_calloc(
            sizeof(globus_off_t) * event->stripe_count, 1);

        /* register periodic for events */
        if(op->type == GLOBUS_L_GSC_OP_TYPE_SEND)
        {
            GlobusTimeReltimeSet(
                delay, op->server_handle->opts.retr_perf_frequency, 0);
        }
        else
        {
            GlobusTimeReltimeSet(
                delay, op->server_handle->opts.perf_frequency, 0);
        }
        event->perf_running = GLOBUS_TRUE;
        res = globus_callback_register_periodic(
            &event->periodic_handle,
            &delay,
            &delay,
            globus_l_gsc_send_perf_marker_cb,
            op);
        if(res != GLOBUS_SUCCESS)
        {
            globus_panic(&globus_i_gsc_module, res, "one shot failed.");
        }
    }

    /* restart markers */
    if(op->server_handle->opts.restart_frequency > 0 &&
        event->event_mask & GLOBUS_GRIDFTP_SERVER_CONTROL_EVENT_RESTART &&
        op->type == GLOBUS_L_GSC_OP_TYPE_RECV)
    {
        GlobusTimeReltimeSet(
            delay, op->server_handle->opts.restart_frequency, 0);
        event->restart_running = GLOBUS_TRUE;
        res = globus_callback_register_periodic(
            &event->restart_handle,
            &delay,
            &delay,
            globus_l_gsc_send_restart_marker_cb,
            op);
        if(res != GLOBUS_SUCCESS)
        {
            globus_panic(&globus_i_gsc_module, res, "one shot failed.");
        }
    }
}