コード例 #1
0
ファイル: condor_auth_x509.cpp プロジェクト: emaste/htcondor
Condor_Auth_X509 :: Condor_Auth_X509(ReliSock * sock)
    : Condor_Auth_Base (sock, CAUTH_GSI),
      credential_handle(GSS_C_NO_CREDENTIAL),
      context_handle   (GSS_C_NO_CONTEXT),
      m_gss_server_name(NULL),
      token_status     (0),
      ret_flags        (0)
{
#ifdef WIN32
	ParseMapFile();
#endif
	if ( !m_globusActivated ) {
		// The Globus callout module is a system-wide setting.  There are several
		// cases where a user may not want it to apply to Condor by default
		// (for example, if it causes crashes when mixed with Condor libs!).
		// Setting GSI_AUTHZ_CONF=/dev/null works for disabling the callouts.
		std::string gsi_authz_conf;
		if (param(gsi_authz_conf, "GSI_AUTHZ_CONF")) {
			if (globus_libc_setenv("GSI_AUTHZ_CONF", gsi_authz_conf.c_str(), 1)) {
				dprintf(D_ALWAYS, "Failed to set the GSI_AUTHZ_CONF environment variable.\n");
				EXCEPT("Failed to set the GSI_AUTHZ_CONF environment variable.\n");
			}
		}
		// In 99% of cases, this is a no-op because the Globus threading model defaults
		// to "none".  However, this can be overridden by a user's environment variable
		// and I'd prefer to take no chances.  This call can fail if a globus module
		// has already been activated (i.e., in the GAHP).  As the defaults are OK,
		// the logging is done at FULLDEBUG, not ALWAYS.
		if (globus_thread_set_model( GLOBUS_THREAD_MODEL_NONE ) != GLOBUS_SUCCESS) {
			dprintf(D_FULLDEBUG, "Unable to explicitly turn-off Globus threading."
				"  Will proceed with the default.\n");
		}
		globus_module_activate( GLOBUS_GSI_GSSAPI_MODULE );
		globus_module_activate( GLOBUS_GSI_GSS_ASSIST_MODULE );
		m_globusActivated = true;
	}
}
コード例 #2
0
ファイル: module.c プロジェクト: eunsungc/gt6-RAMSES_8_5
static
int
globus_l_gsi_gssapi_parse_config(
    char *gsi_conf_data)
{
    char                               *p = gsi_conf_data;
    char                               *n = NULL; //newline
    char                               *c = NULL; //comment-start
    char                               *e = NULL; //equal
    int                                 rc = GLOBUS_SUCCESS;
    const char                          conf_key_prefix[] = "GLOBUS_GSSAPI_";
    const char                         *conf_keys[] = {
        "GLOBUS_GSSAPI_FORCE_TLS",
        "GLOBUS_GSSAPI_NAME_COMPATIBILITY",
        "GLOBUS_GSSAPI_CIPHERS",
        "GLOBUS_GSSAPI_SERVER_CIPHER_ORDER",
        NULL
    };

    while (p && *p)
    {
        n = strchr(p, '\n');
        if (n != NULL)
        {
            *n = '\0';
        }
        c = strchr(p, '#');
        if (c)
        {
            *c = '\0';
        }
        if (*p)
        {
            e = strchr(p, '=');
            if (e == NULL)
            {
                rc = GLOBUS_FAILURE;
                goto conf_parse_error;
            }
            *e = '\0';
            globus_l_gsi_trim_whitespace(p);
            if (strlen(e+1) > 0)
            {
                e++;
                globus_l_gsi_trim_whitespace(e);

                for (int i = 0; conf_keys[i] != NULL; i++)
                {
                    if (strcmp(p, conf_keys[i]+sizeof(conf_key_prefix)-1) == 0
                        && !getenv(conf_keys[i]))
                    {
                        char *newe = strdup(e);
                        if (!newe)
                        {
                            rc = GLOBUS_FAILURE;
                            goto conf_set_fail;
                        }
                        globus_module_setenv(conf_keys[i], newe);
                        if (getenv(conf_keys[i]) == NULL)
                        {
                            globus_libc_setenv(conf_keys[i], newe, 1);
                        }

                        break;
                    }
                }
            }
        }
        if (n == NULL)
        {
            p = NULL;
        }
        else
        {
            p = n+1;
        }
    }
conf_set_fail:
conf_parse_error:
    return rc;
}
コード例 #3
0
ファイル: main.c プロジェクト: bbockelm/globus-toolkit
int
main(
    int                                 argc,
    char **                             argv)
{
    int                                 rc;
    globus_gram_job_manager_config_t    config;
    globus_gram_job_manager_t           manager;
    char *                              sleeptime_str;
    long                                sleeptime = 0;
    globus_bool_t                       debug_mode_service = GLOBUS_FALSE;
    globus_bool_t                       located_active_jm = GLOBUS_FALSE;
    int                                 http_body_fd = -1;
    int                                 context_fd = -1;
    gss_cred_id_t                       cred = GSS_C_NO_CREDENTIAL;
    OM_uint32                           major_status, minor_status;
    pid_t                               forked_starter = 0;
    globus_bool_t                       cgi_invoked = GLOBUS_FALSE;
    int                                 lock_tries_left = 10;

    if ((sleeptime_str = getenv("GLOBUS_JOB_MANAGER_SLEEP")))
    {
        sleeptime = atoi(sleeptime_str);
        sleep(sleeptime);
    }
    if (getenv("GATEWAY_INTERFACE"))
    {
        cgi_invoked = GLOBUS_TRUE;
    }
    /*
     * Stdin and stdout point at socket to client
     * Make sure no buffering.
     * stderr may also, depending on the option in the grid-services
     */
    setbuf(stdout,NULL);
    /* Don't export these to the perl scripts */
    fcntl(STDIN_FILENO, F_SETFD, (int) 1);
    fcntl(STDOUT_FILENO, F_SETFD, (int) 1);
    fcntl(STDERR_FILENO, F_SETFD, (int) 1);

    /*
     * At least have minimal POSIX path for job environment via extra
     * environment values
     */
    if(getenv("PATH") == NULL)
    {
        char * path;
        char default_path[] = "/usr/bin:/bin";
        size_t pathlen;

        pathlen = confstr(_CS_PATH, NULL, (size_t) 0);

        if (pathlen < sizeof(default_path))
        {
            pathlen = sizeof(default_path);
        }
        path = malloc(pathlen);
        path[0] = 0;

        (void) confstr(_CS_PATH, path, pathlen);
        if (path[0] == 0)
        {
            strncpy(path, default_path, pathlen);
        }
        setenv("PATH", path, 1);
    }

    /* Force non-threaded execution for now */
    globus_thread_set_model(GLOBUS_THREAD_MODEL_NONE);

    /* Activate a common before parsing command-line so that
     * things work. Note that we can't activate everything yet because we might
     * set the GLOBUS_TCP_PORT_RANGE after parsing command-line args and we
     * need that set before activating XIO.
     */
    rc = globus_module_activate(GLOBUS_COMMON_MODULE);
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "Error activating GLOBUS_COMMON_MODULE\n");
        exit(1);
    }

    /* Parse command line options to get jobmanager configuration */
    rc = globus_gram_job_manager_config_init(&config, argc, argv);
    if (rc != GLOBUS_SUCCESS)
    {
        reply_and_exit(NULL, rc, NULL);
    }

    globus_thread_key_create(
            &globus_i_gram_request_key,
            NULL);

    rc = globus_gram_job_manager_logging_init(&config);
    if (rc != GLOBUS_SUCCESS)
    {
        exit(1);
    }
    if (getenv("GRID_SECURITY_HTTP_BODY_FD") == NULL && !cgi_invoked)
    {
        debug_mode_service = GLOBUS_TRUE;
    }
    /* Set environment variables from configuration */
    if(config.globus_location != NULL)
    {
        globus_libc_setenv("GLOBUS_LOCATION",
                           config.globus_location,
                           GLOBUS_TRUE);
    }
    if(config.tcp_port_range != NULL)
    {
        globus_libc_setenv("GLOBUS_TCP_PORT_RANGE",
                           config.tcp_port_range,
                           GLOBUS_TRUE);
    }
    if(config.tcp_source_range != NULL)
    {
        globus_libc_setenv("GLOBUS_TCP_SOURCE_RANGE",
                           config.tcp_source_range,
                           GLOBUS_TRUE);
    }

    /* Activate all of the modules we will be using */
    rc = globus_l_gram_job_manager_activate();
    if(rc != GLOBUS_SUCCESS)
    {
        exit(1);
    }

    /*
     * Get the delegated credential (or the default credential if we are
     * run without a client. Don't care about errors in the latter case.
     */
    major_status = globus_gss_assist_acquire_cred(
            &minor_status,
            GSS_C_BOTH,
            &cred);
    if ((!debug_mode_service) && GSS_ERROR(major_status))
    {
        globus_gss_assist_display_status(
                stderr,
                "Error acquiring security credential\n",
                major_status,
                minor_status,
                0);
        exit(1);
    }

    if (cred != GSS_C_NO_CREDENTIAL)
    {
        unsigned long hash;
        char * newtag;

        rc = globus_gram_gsi_get_dn_hash(
                cred,
                &hash);
        if (rc == GLOBUS_SUCCESS)
        {
            newtag = globus_common_create_string("%s%s%lx",
                    strcmp(config.service_tag, "untagged") == 0
                            ? "" : config.service_tag,
                    strcmp(config.service_tag, "untagged") == 0
                            ? "" : ".",
                    hash);
            free(config.service_tag);
            config.service_tag = newtag;
        }
    }

    /*
     * Remove delegated proxy from disk.
     */
    if ((!debug_mode_service) && getenv("X509_USER_PROXY") != NULL)
    {
        remove(getenv("X509_USER_PROXY"));
        unsetenv("X509_USER_PROXY");
    }

    /* Set up LRM-specific state based on our configuration. This will create
     * the job contact listener, start the SEG if needed, and open the log
     * file if needed.
     */
    rc = globus_gram_job_manager_init(&manager, cred, &config);
    if(rc != GLOBUS_SUCCESS)
    {
        reply_and_exit(NULL, rc, manager.gt3_failure_message);
    }

    /*
     * Pull out file descriptor numbers for security context and job request
     * from the environment (set by the gatekeeper)
     */
    if (cgi_invoked)
    {
        http_body_fd = 0;
        context_fd = -1;
    }
    else if (!debug_mode_service)
    {
        char * fd_env = getenv("GRID_SECURITY_HTTP_BODY_FD");

        rc = sscanf(fd_env ? fd_env : "-1", "%d", &http_body_fd);
        if (rc != 1 || http_body_fd < 0)
        {
            fprintf(stderr, "Error locating http body fd\n");
            exit(1);
        }
        fcntl(http_body_fd, F_SETFD, 1);

        fd_env = getenv("GRID_SECURITY_CONTEXT_FD");
        rc = sscanf(fd_env ? fd_env : "-1", "%d", &context_fd);
        if (rc != 1 || context_fd < 0)
        {
            fprintf(stderr, "Error locating security context fd\n");
            exit(1);
        }
        fcntl(context_fd, F_SETFD, 1);
    }


    /* Redirect stdin from /dev/null, we'll handle stdout after the reply is
     * sent
     */
    if (!cgi_invoked)
    {
        freopen("/dev/null", "r", stdin);
    }

    /* Here we'll either become the active job manager to process all
     * jobs for this user/host/lrm combination, or we'll hand off the
     * file descriptors containing the info to the active job manager
     */
    while (!located_active_jm)
    {
        /* We'll try to get the lock file associated with being the
         * active job manager here. If we get the OLD_JM_ALIVE error
         * somebody else has it
         */
        rc = globus_gram_job_manager_startup_lock(
                &manager,
                &manager.lock_fd);
        if (rc == GLOBUS_SUCCESS)
        {
            /* We've acquired the lock. We will fork a new process to act like
             * all other job managers which don't have the lock, and continue
             * on in this process managing jobs for this LRM.  Note that the
             * child process does not inherit the lock
             */
            if (!debug_mode_service)
            {
                int save_errno = 0;

                /* We've acquired the manager lock */
                forked_starter = fork();
                save_errno = errno;

                if (forked_starter < 0)
                {
                    if (sleeptime != 0)
                    {
                        sleep(sleeptime);
                    }

                    fprintf(stderr, "fork failed: %s", strerror(save_errno));
                    exit(1);
                }
                else if (forked_starter == 0)
                {
                    /* We are the child process. We'll close our reference to
                     * the lock and let the other process deal with jobs
                     */
                    close(manager.lock_fd);
                    manager.lock_fd = -1;
                }
                globus_logging_update_pid();
                if (sleeptime != 0)
                {
                    sleep(sleeptime);
                }

            }

            if (manager.lock_fd >= 0)
            {
                /* We hold the manager lock, so we'll store our credential, and
                 * then, try to accept socket connections. If the socket
                 * connections fail, we'll exit, and another process
                 * will be forked to handle them.
                 */
                rc = globus_gram_job_manager_gsi_write_credential(
                        NULL,
                        cred,
                        manager.cred_path);

                if (rc != GLOBUS_SUCCESS)
                {
                    fprintf(stderr, "write cred failed\n");
                    exit(1);
                }
                if (!debug_mode_service)
                {
                    close(http_body_fd);
                    http_body_fd = -1;
                }

                rc = globus_gram_job_manager_startup_socket_init(
                        &manager,
                        &manager.active_job_manager_handle,
                        &manager.socket_fd);
                if (rc != GLOBUS_SUCCESS)
                {
                    /* This releases our lock. Either the child process will
                     * attempt to acquire the lock again or some another job
                     * manager will acquire the lock
                     */
                    exit(0);
                }
                assert(manager.socket_fd != -1);
            }
        }
        else if (rc != GLOBUS_GRAM_PROTOCOL_ERROR_OLD_JM_ALIVE)
        {
            /* Some system error. Try again */
            if (--lock_tries_left == 0)
            {
                reply_and_exit(NULL, rc, "Unable to create lock file");
            }
            sleep(1);
            continue;
        }

        /* If manager.socket_fd != -1 then we are the main job manager for this
         * LRM.
         * We will restart all existing jobs and then allow the startup
         * socket to accept new jobs from other job managers.
         */
        if (manager.socket_fd != -1)
        {
            /* Look up cputype/manufacturer if not known yet */
            globus_l_gram_cputype_and_manufacturer(manager.config);

            GlobusTimeAbstimeGetCurrent(manager.usagetracker->jm_start_time);            
            globus_i_gram_usage_stats_init(&manager);
            globus_i_gram_usage_start_session_stats(&manager);

            located_active_jm = GLOBUS_TRUE;

            /* Load existing jobs. The show must go on if this fails, unless it
             * fails with a misconfiguration error
             */
            rc = globus_gram_job_manager_request_load_all(
                    &manager);
            if (rc == GLOBUS_GRAM_PROTOCOL_ERROR_GATEKEEPER_MISCONFIGURED)
            {
                if (forked_starter > 0)
                {
                    kill(forked_starter, SIGTERM);
                    forked_starter = 0;
                }
                reply_and_exit(NULL, rc, manager.gt3_failure_message);
            }
            if (context_fd != -1)
            {
                close(context_fd);
                context_fd = -1;
            }
            freopen("/dev/null", "a", stdout);

            /* At this point, seg_last_timestamp is the earliest last timestamp 
             * for any pre-existing jobs. If that is 0, then we don't have any
             * existing jobs so we'll just ignore seg events prior to now.
             */
            if (manager.seg_last_timestamp == 0)
            {
                manager.seg_last_timestamp = time(NULL);
            }

            /* Start off the SEG if we need it.
             */
            if (config.seg_module != NULL || 
                strcmp(config.jobmanager_type, "fork") == 0 ||
                strcmp(config.jobmanager_type, "condor") == 0)
            {
                rc = globus_gram_job_manager_init_seg(&manager);

                /* TODO: If SEG load fails and load_all added some to the 
                 * job_id hash, they will need to be pushed into the state
                 * machine so that polling fallback can happen.
                 */
                if (rc != GLOBUS_SUCCESS)
                {
                    config.seg_module = NULL;
                }
            }
            /* GRAM-128:
             * Register a periodic event to process the GRAM jobs that were
             * reloaded from their job state files at job manager start time.
             * This will acquire and then release a reference to each job,
             * which, behind the scenes, will kick of the state machine
             * for that job if needed.
             */
            if (!globus_list_empty(manager.pending_restarts))
            {
                globus_reltime_t        restart_period;

                GlobusTimeReltimeSet(restart_period, 1, 0);

                rc = globus_callback_register_periodic(
                        &manager.pending_restart_handle,
                        NULL,
                        &restart_period,
                        globus_l_gram_process_pending_restarts,
                        &manager);
                        
            }

            {
                globus_reltime_t        expire_period;

                GlobusTimeReltimeSet(expire_period, 1, 0);

                rc = globus_callback_register_periodic(
                    &manager.expiration_handle,
                    NULL,
                    &expire_period,
                    globus_gram_job_manager_expire_old_jobs,
                    &manager);
            }

            {
                globus_reltime_t        lockcheck_period;

                GlobusTimeReltimeSet(lockcheck_period, 60, 0);

                rc = globus_callback_register_periodic(
                    &manager.lockcheck_handle,
                    NULL,
                    &lockcheck_period,
                    globus_l_gram_lockcheck,
                    &manager);
            }

            {
                globus_reltime_t        idlescript_period;

                GlobusTimeReltimeSet(idlescript_period, 60, 0);

                rc = globus_callback_register_periodic(
                    &manager.idle_script_handle,
                    NULL,
                    &idlescript_period,
                    globus_gram_script_close_idle,
                    &manager);
            }
        }
        else if (http_body_fd >= 0)
        {
            /* If manager.socket_fd == -1 then we are either the child from the
             * fork or another process started somehow (either command-line
             * invocation or via a job submit). If we have a client, then we'll
             * send our fds to the job manager with the lock and let it process
             * the job.
             *
             * If this succeeds, we set located_active_jm and leave the loop.
             * Otherwise, we try again.
             */
            if (context_fd >= 0)
            {
                rc = globus_gram_job_manager_starter_send(
                        &manager,
                        http_body_fd,
                        context_fd,
                        fileno(stdout),
                        cred);
            }
            else
            {
                rc = globus_gram_job_manager_starter_send_v2(
                        &manager,
                        cred);
            }
            if (rc == GLOBUS_SUCCESS)
            {
                located_active_jm = GLOBUS_TRUE;
                close(http_body_fd);
                if (context_fd >= 0)
                {
                    close(context_fd);
                }
                manager.done = GLOBUS_TRUE;
            }
            else
            {
                globus_libc_usleep(250000);
            }
        }
        else
        {
            /* We were started by hand, but another process is currently the
             * main job manager
             */
            unsigned long realpid = 0;
            FILE * pidin = fopen(manager.pid_path, "r");
            fscanf(pidin, "%lu", &realpid);
            fclose(pidin);

            fprintf(stderr, "Other job manager process with pid %lu running and processing jobs\n",
                    realpid);

            exit(0);
        }
    }

    /* Ignore SIGCHILD, and automatically reap child processes. Because of the
     * fork() above to delegate to another job manager process, and the use of
     * sub-processes to invoke the perl modules, we create some other
     * processes. We don't care too much how they exit, so we'll just make sure
     * we don't create zombies out of them.
     */
    {
        struct sigaction act;

        act.sa_handler = SIG_IGN;
        sigemptyset(&act.sa_mask);
        sigaddset(&act.sa_mask, SIGCHLD);
#ifdef SA_NOCLDWAIT
        act.sa_flags = SA_NOCLDWAIT;
#else
        /* This may leave zombies running on non-POSIX systems like Hurd */
        act.sa_flags = 0;
#endif
        sigaction(SIGCHLD, &act, NULL);
    }

    /* Enable log rotation via SIGUSR1 */
    {
        struct sigaction act;
        act.sa_handler = globus_i_job_manager_log_rotate;
        sigemptyset(&act.sa_mask);
        sigaddset(&act.sa_mask, SIGUSR1);
        act.sa_flags = 0;
        sigaction(SIGUSR1, &act, NULL);
    }
    
    GlobusGramJobManagerLock(&manager);
    if (manager.socket_fd != -1 &&
        globus_hashtable_empty(&manager.request_hash) &&
        manager.grace_period_timer == GLOBUS_NULL_HANDLE)
    {
        globus_gram_job_manager_set_grace_period_timer(&manager);
    }


    /* For the active job manager, this will block until all jobs have
     * terminated. For any other job manager, the monitor.done is set to
     * GLOBUS_TRUE and this falls right through.
     */
    while (! manager.done)
    {
        GlobusGramJobManagerWait(&manager);
    }
    if (manager.expiration_handle != GLOBUS_NULL_HANDLE)
    {
        globus_callback_unregister(manager.expiration_handle, NULL, NULL, NULL);
    }
    if (manager.lockcheck_handle != GLOBUS_NULL_HANDLE)
    {
        globus_callback_unregister(manager.lockcheck_handle, NULL, NULL, NULL);
    }
    if (manager.idle_script_handle != GLOBUS_NULL_HANDLE)
    {
        globus_callback_unregister(manager.idle_script_handle, NULL, NULL, NULL);
    }
    GlobusGramJobManagerUnlock(&manager);

    globus_gram_job_manager_log(
            &manager,
            GLOBUS_GRAM_JOB_MANAGER_LOG_DEBUG,
            "event=gram.end "
            "level=DEBUG "
            "\n");

    /* Clean-up to do if we are the active job manager only */
    if (manager.socket_fd != -1)
    {
        globus_gram_job_manager_script_close_all(&manager);
        globus_i_gram_usage_end_session_stats(&manager);
        globus_i_gram_usage_stats_destroy(&manager);
        remove(manager.pid_path);
        remove(manager.cred_path);
        remove(manager.socket_path);
        remove(manager.lock_path);
    }
    globus_gram_job_manager_logging_destroy();
    globus_gram_job_manager_destroy(&manager);
    globus_gram_job_manager_config_destroy(&config);

    rc = globus_l_gram_deactivate();
    if (rc != GLOBUS_SUCCESS)
    {
        fprintf(stderr, "deactivation failed with rc=%d\n",
                rc);
        exit(1);
    }

/*
    {
        const char * gk_jm_id_var = "GATEKEEPER_JM_ID";
        const char * gk_jm_id = globus_libc_getenv(gk_jm_id_var);

        globus_gram_job_manager_request_acct(
                request,
                "%s %s JM exiting\n",
                gk_jm_id_var, gk_jm_id ? gk_jm_id : "none");
    }
*/


    return(0);
}