// move to a new position Position & vast_dc::setpos (Position &pt) { // do not move if we havn't joined (no neighbors are known unless I'm gateway) if (is_joined () == true) { // do necessary adjustments adjust_aoi (); remove_nonoverlapped (); // check for position overlap with neighbors and make correction map<VAST::id_t, Node>::iterator it; for (it = _id2node.begin(); it != _id2node.end(); ++it) { if (it->first == _self.id) continue; if (it->second.pos == pt) { pt.x++; it = _id2node.begin(); } } // update location information //double dis = _self.pos.dist (pt); _self.pos = pt; _self.time = _net->get_curr_timestamp ();//_time; update_node (_self); #ifdef DEBUG_DETAIL printf ("[%lu] setpos (%d, %d)\n", _self.id, (int)_self.pos.x, (int)_self.pos.y); #endif // notify all connected neighbors msgtype_t msgtype; VAST::id_t target_id; // go over each neighbor and do a boundary neighbor check int n = 0; for (it = _id2node.begin(); it != _id2node.end(); ++it) { if ((target_id = it->first) == _self.id) continue; if (_voronoi->is_boundary (target_id, _self.pos, _self.aoi) == true) msgtype = DC_MOVE_B; else msgtype = DC_MOVE; _net->sendmsg (target_id, (msgtype_t)msgtype, (char *)&_self, sizeof (Node), false); n++; } } return _self.pos; }
END_TEST START_TEST(is_joined_test) { job pjob; memset(&pjob, 0, sizeof(pjob)); pjob.ji_wattr[JOB_ATR_join].at_flags |= ATR_VFLAG_SET; pjob.ji_wattr[JOB_ATR_join].at_val.at_str = strdup("oe"); fail_unless(is_joined(&pjob, JOB_ATR_exec_host) == 0); fail_unless(is_joined(&pjob, JOB_ATR_outpath) == 0); fail_unless(is_joined(&pjob, JOB_ATR_errpath) == 1); pjob.ji_wattr[JOB_ATR_join].at_val.at_str = strdup("e"); fail_unless(is_joined(&pjob, JOB_ATR_outpath) == 0); pjob.ji_wattr[JOB_ATR_join].at_val.at_str = strdup("o"); fail_unless(is_joined(&pjob, JOB_ATR_errpath) == 0); }
int run_pelog( int which, /* I (one of PE_*) */ char *specpelog, /* I - script path */ job *pjob, /* I - associated job */ int pe_io_type, /* I - io type */ int deletejob) /* I - called before a job being deleted (purge -p) */ { struct sigaction act; struct sigaction oldact; char *arg[12]; int fds1 = 0; int fds2 = 0; int fd_input; char resc_list[2048]; char resc_used[2048]; struct stat sbuf; char sid[20]; char exit_stat[11]; int waitst; int isjoined; /* boolean */ char buf[MAXPATHLEN + 1024]; char pelog[MAXPATHLEN + 1024]; uid_t real_uid; gid_t *real_gids = NULL; gid_t real_gid; int num_gids; int jobtypespecified = 0; resource *r; char *EmptyString = (char *)""; int LastArg; int aindex; int rc; char *ptr; int moabenvcnt = 14; /* # of entries in moabenvs */ static char *moabenvs[] = { (char *)"MOAB_NODELIST", (char *)"MOAB_JOBID", (char *)"MOAB_JOBNAME", (char *)"MOAB_USER", (char *)"MOAB_GROUP", (char *)"MOAB_CLASS", (char *)"MOAB_TASKMAP", (char *)"MOAB_QOS", (char *)"MOAB_PARTITION", (char *)"MOAB_PROCCOUNT", (char *)"MOAB_NODECOUNT", (char *)"MOAB_MACHINE", (char *)"MOAB_JOBARRAYINDEX", (char *)"MOAB_JOBARRAYRANGE" }; if ((pjob == NULL) || (specpelog == NULL) || (specpelog[0] == '\0')) { return(0); } ptr = pjob->ji_wattr[JOB_ATR_jobtype].at_val.at_str; if (ptr != NULL) { jobtypespecified = 1; snprintf(pelog,sizeof(pelog),"%s.%s", specpelog, ptr); } else { snprintf(pelog, sizeof(pelog), "%s", specpelog); } real_uid = getuid(); real_gid = getgid(); if ((num_gids = getgroups(0, real_gids)) < 0) { log_err(errno, __func__, (char *)"getgroups failed\n"); return(-1); } /* to support root squashing, become the user before performing file checks */ if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER) || (which == PE_PROLOGUSERJOB) || (which == PE_EPILOGUSERJOB)) { real_gids = (gid_t *)calloc(num_gids, sizeof(gid_t)); if (real_gids == NULL) { log_err(ENOMEM, __func__, (char *)"Cannot allocate memory! FAILURE\n"); return(-1); } if (getgroups(num_gids,real_gids) < 0) { log_err(errno, __func__, (char *)"getgroups failed\n"); free(real_gids); return(-1); } /* pjob->ji_grpcache will not be set if using LDAP and LDAP not set */ /* It is possible that ji_grpcache failed to allocate as well. Make sure ji_grpcache is not NULL */ if (pjob->ji_grpcache != NULL) { if (setgroups( pjob->ji_grpcache->gc_ngroup, (gid_t *)pjob->ji_grpcache->gc_groups) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setgroups() for UID = %lu failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, __func__, log_buffer); undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(-1); } } else { sprintf(log_buffer, "pjob->ji_grpcache is null. check_pwd likely failed."); log_err(-1, __func__, log_buffer); undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(-1); } if (setegid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setegid(%lu) for UID = %lu failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exgid, (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, __func__, log_buffer); undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(-1); } if (setuid_ext(pjob->ji_qs.ji_un.ji_momt.ji_exuid, TRUE) != 0) { snprintf(log_buffer,sizeof(log_buffer), "seteuid(%lu) failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, __func__, log_buffer); undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(-1); } } rc = stat(pelog,&sbuf); if ((rc == -1) && (jobtypespecified == 1)) { snprintf(pelog, sizeof(pelog), "%s", specpelog); rc = stat(pelog,&sbuf); } if (rc == -1) { if (errno == ENOENT || errno == EBADF) { /* epilog/prolog script does not exist */ if (LOGLEVEL >= 5) { static char tmpBuf[1024]; sprintf(log_buffer, "%s script '%s' for job %s does not exist (cwd: %s,pid: %d)", PPEType[which], (pelog[0] != '\0') ? pelog : "NULL", pjob->ji_qs.ji_jobid, getcwd(tmpBuf, sizeof(tmpBuf)), getpid()); log_record(PBSEVENT_SYSTEM, 0, __func__, log_buffer); } #ifdef ENABLE_CSA if ((which == PE_EPILOGUSER) && (!strcmp(pelog, path_epiloguser))) { /* * Add a workload management end record */ if (LOGLEVEL >= 8) { sprintf(log_buffer, "%s calling add_wkm_end from run_pelog() - no user epilog", pjob->ji_qs.ji_jobid); log_err(-1, __func__, log_buffer); } add_wkm_end(pjob->ji_wattr[JOB_ATR_pagg_id].at_val.at_ll, pjob->ji_qs.ji_un.ji_momt.ji_exitstat, pjob->ji_qs.ji_jobid); } #endif /* ENABLE_CSA */ undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(0); } undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(pelog_err(pjob,pelog,errno,(char *)"cannot stat")); } if (LOGLEVEL >= 5) { sprintf(log_buffer,"running %s script '%s' for job %s", PPEType[which], (pelog[0] != '\0') ? pelog : "NULL", pjob->ji_qs.ji_jobid); log_ext(-1, __func__, log_buffer, LOG_DEBUG); /* not actually an error--but informational */ } /* script must be owned by root, be regular file, read and execute by user * * and not writeable by group or other */ if (reduceprologchecks == TRUE) { if ((!S_ISREG(sbuf.st_mode)) || (!(sbuf.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))) { undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(pelog_err(pjob,pelog,-1, (char *)"permission Error")); } } else { if (which == PE_PROLOGUSERJOB || which == PE_EPILOGUSERJOB) { if ((sbuf.st_uid != pjob->ji_qs.ji_un.ji_momt.ji_exuid) || (!S_ISREG(sbuf.st_mode)) || ((sbuf.st_mode & (S_IRUSR | S_IXUSR)) != (S_IRUSR | S_IXUSR)) || (sbuf.st_mode & (S_IWGRP | S_IWOTH))) { undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(pelog_err(pjob,pelog,-1, (char *)"permission Error")); } } else if ((sbuf.st_uid != 0) || (!S_ISREG(sbuf.st_mode)) || ((sbuf.st_mode & (S_IRUSR | S_IXUSR)) != (S_IRUSR | S_IXUSR)) ||\ (sbuf.st_mode & (S_IWGRP | S_IWOTH))) { undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(pelog_err(pjob,pelog,-1, (char *)"permission Error")); } if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER)) { /* script must also be read and execute by other */ if ((sbuf.st_mode & (S_IROTH | S_IXOTH)) != (S_IROTH | S_IXOTH)) { undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(pelog_err(pjob, pelog, -1, (char *)"permission Error")); } } } /* END !reduceprologchecks */ fd_input = pe_input(pjob->ji_qs.ji_jobid); if (fd_input < 0) { undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); return(pelog_err(pjob, pelog, -2, (char *)"no pro/epilogue input file")); } run_exit = 0; child = fork(); if (child > 0) { int KillSent = FALSE; /* parent - watch for prolog/epilog to complete */ close(fd_input); /* switch back to root if necessary */ undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__); free(real_gids); act.sa_handler = pelogalm; sigemptyset(&act.sa_mask); act.sa_flags = 0; sigaction(SIGALRM, &act, &oldact); /* it would be nice if the harvest routine could block for 5 seconds, and if the prolog is not complete in that time, mark job as prolog pending, append prolog child, and continue */ /* main loop should attempt to harvest prolog in non-blocking mode. If unsuccessful after timeout, job should be terminated, and failure reported. If successful, mom should unset prolog pending, and continue with job start sequence. Mom should report job as running while prologpending flag is set. (NOTE: must track per job prolog start time) */ alarm(pe_alarm_time); while (waitpid(child, &waitst, 0) < 0) { if (errno != EINTR) { /* exit loop. non-alarm based failure occurred */ run_exit = -3; MOMPrologFailureCount++; break; } if (run_exit == -4) { if (KillSent == FALSE) { MOMPrologTimeoutCount++; /* timeout occurred */ KillSent = TRUE; /* NOTE: prolog/epilog may be locked in KERNEL space and unkillable */ alarm(5); } else { /* cannot kill prolog/epilog, give up */ run_exit = -5; break; } } } /* END while (wait(&waitst) < 0) */ /* epilog/prolog child completed */ #ifdef ENABLE_CSA if ((which == PE_EPILOGUSER) && (!strcmp(pelog, path_epiloguser))) { /* * Add a workload management end record */ if (LOGLEVEL >= 8) { sprintf(log_buffer, "%s calling add_wkm_end from run_pelog() - after user epilog", pjob->ji_qs.ji_jobid); log_err(-1, __func__, log_buffer); } add_wkm_end(pjob->ji_wattr[JOB_ATR_pagg_id].at_val.at_ll, pjob->ji_qs.ji_un.ji_momt.ji_exitstat, pjob->ji_qs.ji_jobid); } #endif /* ENABLE_CSA */ alarm(0); /* restore the previous handler */ sigaction(SIGALRM, &oldact, 0); if (run_exit == 0) { if (WIFEXITED(waitst)) { run_exit = WEXITSTATUS(waitst); } } } else { /* child - run script */ log_close(0); if (lockfds >= 0) { close(lockfds); lockfds = -1; } net_close(-1); if (fd_input != 0) { close(0); if (dup(fd_input) == -1) {} close(fd_input); } if (pe_io_type == PE_IO_TYPE_NULL) { /* no output, force to /dev/null */ fds1 = open("/dev/null", O_WRONLY, 0600); fds2 = open("/dev/null", O_WRONLY, 0600); } else if (pe_io_type == PE_IO_TYPE_STD) { /* open job standard out/error */ /* * We need to know if files are joined or not. * If they are then open the correct file and duplicate it to the other */ isjoined = is_joined(pjob); switch (isjoined) { case -1: fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds1 = (fds2 < 0)?-1:dup(fds2); break; case 1: fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds2 = (fds1 < 0)?-1:dup(fds1); break; default: fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); break; } } if (!deletejob) if ((fds1 < 0) || (fds2 < 0)) { if (fds1 >= 0) close(fds1); if (fds2 >= 0) close(fds2); exit(-1); } if (pe_io_type != PE_IO_TYPE_ASIS) { /* If PE_IO_TYPE_ASIS, leave as is, already open to job */ if (fds1 != 1) { close(1); if (dup(fds1) >= 0) { close(fds1); } } if (fds2 != 2) { close(2); if (dup(fds2) >= 0) { close(fds2); } } } if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER) || (which == PE_PROLOGUSERJOB) || (which == PE_EPILOGUSERJOB)) { if (chdir(pjob->ji_grpcache->gc_homedir) != 0) { /* warn only, no failure */ sprintf(log_buffer, "PBS: chdir to %s failed: %s (running user %s in current directory)", pjob->ji_grpcache->gc_homedir, strerror(errno), which == PE_PROLOGUSER ? "prologue" : "epilogue"); if (write_ac_socket(2, log_buffer, strlen(log_buffer)) == -1) {} fsync(2); } } /* for both prolog and epilog */ if (DEBUGMODE == 1) { fprintf(stderr, "PELOGINFO: script:'%s' jobid:'%s' euser:'******' egroup:'%s' jobname:'%s' SSID:'%ld' RESC:'%s'\n", pelog, pjob->ji_qs.ji_jobid, pjob->ji_wattr[JOB_ATR_euser].at_val.at_str, pjob->ji_wattr[JOB_ATR_egroup].at_val.at_str, pjob->ji_wattr[JOB_ATR_jobname].at_val.at_str, pjob->ji_wattr[JOB_ATR_session_id].at_val.at_long, resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list))); } arg[0] = pelog; arg[1] = pjob->ji_qs.ji_jobid; arg[2] = pjob->ji_wattr[JOB_ATR_euser].at_val.at_str; arg[3] = pjob->ji_wattr[JOB_ATR_egroup].at_val.at_str; arg[4] = pjob->ji_wattr[JOB_ATR_jobname].at_val.at_str; /* NOTE: inside child */ if ((which == PE_EPILOG) || (which == PE_EPILOGUSER) || (which == PE_EPILOGUSERJOB)) { /* for epilog only */ sprintf(sid, "%ld", pjob->ji_wattr[JOB_ATR_session_id].at_val.at_long); sprintf(exit_stat,"%d", pjob->ji_qs.ji_un.ji_momt.ji_exitstat); arg[5] = sid; arg[6] = resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list)); arg[7] = resc_to_string(pjob, JOB_ATR_resc_used, resc_used, sizeof(resc_used)); arg[8] = pjob->ji_wattr[JOB_ATR_in_queue].at_val.at_str; arg[9] = pjob->ji_wattr[JOB_ATR_account].at_val.at_str; arg[10] = exit_stat; arg[11] = NULL; LastArg = 11; } else { /* prolog */ arg[5] = resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list)); arg[6] = pjob->ji_wattr[JOB_ATR_in_queue].at_val.at_str; arg[7] = pjob->ji_wattr[JOB_ATR_account].at_val.at_str; arg[8] = NULL; LastArg = 8; } for (aindex = 0;aindex < LastArg;aindex++) { if (arg[aindex] == NULL) arg[aindex] = EmptyString; } /* END for (aindex) */ /* * Pass Resource_List.nodes request in environment * to allow pro/epi-logue setup/teardown of system * settings. --pw, 2 Jan 02 * Fixed to use putenv for sysV compatibility. * --troy, 11 jun 03 * */ r = find_resc_entry( &pjob->ji_wattr[JOB_ATR_resource], find_resc_def(svr_resc_def, (char *)"nodes", svr_resc_size)); if (r != NULL) { /* setenv("PBS_RESOURCE_NODES",r->rs_value.at_val.at_str,1); */ const char *ppn_str = "ppn="; int num_nodes = 1; int num_ppn = 1; /* PBS_RESOURCE_NODES */ put_env_var("PBS_RESOURCE_NODES", r->rs_value.at_val.at_str); /* PBS_NUM_NODES */ num_nodes = strtol(r->rs_value.at_val.at_str, NULL, 10); /* * InitUserEnv() also calculates num_nodes and num_ppn the same way */ if (num_nodes != 0) { char *tmp; char *other_reqs; /* get the ppn */ if ((tmp = strstr(r->rs_value.at_val.at_str,ppn_str)) != NULL) { tmp += strlen(ppn_str); num_ppn = strtol(tmp, NULL, 10); } other_reqs = r->rs_value.at_val.at_str; while ((other_reqs = strchr(other_reqs, '+')) != NULL) { other_reqs += 1; num_nodes += strtol(other_reqs, &other_reqs, 10); } } sprintf(buf, "%d", num_nodes); put_env_var("PBS_NUM_NODES", buf); /* PBS_NUM_PPN */ sprintf(buf, "%d", num_ppn); put_env_var("PBS_NUM_PPN", buf); /* PBS_NP */ sprintf(buf, "%d", pjob->ji_numvnod); put_env_var("PBS_NP", buf); } /* END if (r != NULL) */ r = find_resc_entry( &pjob->ji_wattr[JOB_ATR_resource], find_resc_def(svr_resc_def, (char *)"gres", svr_resc_size)); if (r != NULL) { /* setenv("PBS_RESOURCE_NODES",r->rs_value.at_val.at_str,1); */ put_env_var("PBS_RESOURCE_GRES", r->rs_value.at_val.at_str); } if (TTmpDirName(pjob, buf, sizeof(buf))) { put_env_var("TMPDIR", buf); } /* Set PBS_SCHED_HINT */ { char *envname = (char *)"PBS_SCHED_HINT"; char *envval; if ((envval = get_job_envvar(pjob, envname)) != NULL) { put_env_var("PBS_SCHED_HINT", envval); } } /* Set PBS_NODENUM */ sprintf(buf, "%d", pjob->ji_nodeid); put_env_var("PBS_NODENUM", buf); /* Set PBS_MSHOST */ put_env_var("PBS_MSHOST", pjob->ji_vnods[0].vn_host->hn_host); /* Set PBS_NODEFILE */ if (pjob->ji_flags & MOM_HAS_NODEFILE) { sprintf(buf, "%s/%s", path_aux, pjob->ji_qs.ji_jobid); put_env_var("PBS_NODEFILE", buf); } /* Set PBS_O_WORKDIR */ { char *workdir_val; workdir_val = get_job_envvar(pjob,"PBS_O_WORKDIR"); if (workdir_val != NULL) { put_env_var("PBS_O_WORKDIR", workdir_val); } } /* SET BEOWULF_JOB_MAP */ { struct array_strings *vstrs; int VarIsSet = 0; int j; vstrs = pjob->ji_wattr[JOB_ATR_variables].at_val.at_arst; for (j = 0;j < vstrs->as_usedptr;++j) { if (!strncmp( vstrs->as_string[j], "BEOWULF_JOB_MAP=", strlen("BEOWULF_JOB_MAP="))) { VarIsSet = 1; break; } } if (VarIsSet == 1) { char *val = strchr(vstrs->as_string[j], '='); if (val != NULL) put_env_var("BEOWULF_JOB_MAP", val+1); } } /* Set some Moab env variables if they exist */ if ((which == PE_PROLOG) || (which == PE_EPILOG)) { char *tmp_val; for (aindex=0;aindex<moabenvcnt;aindex++) { tmp_val = get_job_envvar(pjob,moabenvs[aindex]); if (tmp_val != NULL) { put_env_var(moabenvs[aindex], tmp_val); } } } /* * if we want to run as user then we need to reset real user permissions * since it seems that some OSs use real not effective user id when execv'ing */ if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER) || (which == PE_PROLOGUSERJOB) || (which == PE_EPILOGUSERJOB)) { setuid_ext(pbsuser, TRUE); setegid(pbsgroup); if (setgid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setgid(%lu) for UID = %lu failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exgid, (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, __func__, log_buffer); exit(-1); } if (setuid_ext(pjob->ji_qs.ji_un.ji_momt.ji_exuid, FALSE) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setuid(%lu) failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, __func__, log_buffer); exit(-1); } } execv(pelog,arg); sprintf(log_buffer,"execv of %s failed: %s\n", pelog, strerror(errno)); if (write_ac_socket(2, log_buffer, strlen(log_buffer)) == -1) { /* cannot write message to stderr */ /* NO-OP */ } fsync(2); exit(255); } /* END else () */ switch (run_exit) { case 0: /* SUCCESS */ /* NO-OP */ break; case - 3: pelog_err(pjob, pelog, run_exit, (char *)"child wait interrupted"); break; case - 4: pelog_err(pjob, pelog, run_exit, (char *)"prolog/epilog timeout occurred, child cleaned up"); break; case - 5: pelog_err(pjob, pelog, run_exit, (char *) "prolog/epilog timeout occurred, cannot kill child"); break; default: pelog_err(pjob, pelog, run_exit, (char *)"nonzero p/e exit status"); break; } /* END switch (run_exit) */ return(run_exit); } /* END run_pelog() */
int run_pelog( int which, /* I (one of PE_*) */ char *specpelog, /* I - script path */ job *pjob, /* I - associated job */ int pe_io_type) /* I */ { char *id = "run_pelog"; struct sigaction act, oldact; char *arg[12]; int fds1 = 0; int fds2 = 0; int fd_input; char resc_list[2048]; char resc_used[2048]; struct stat sbuf; char sid[20]; char exit_stat[11]; int waitst; int isjoined; /* boolean */ char buf[MAXPATHLEN + 1024]; char pelog[MAXPATHLEN + 1024]; int jobtypespecified = 0; resource *r; char *EmptyString = ""; int LastArg; int aindex; int rc; char *ptr; if ((pjob == NULL) || (specpelog == NULL) || (specpelog[0] == '\0')) { return(0); } ptr = pjob->ji_wattr[(int)JOB_ATR_jobtype].at_val.at_str; if (ptr != NULL) { jobtypespecified = 1; snprintf(pelog,sizeof(pelog),"%s.%s", specpelog, ptr); } else { strncpy(pelog,specpelog,sizeof(pelog)); } rc = stat(pelog,&sbuf); if ((rc == -1) && (jobtypespecified == 1)) { strncpy(pelog,specpelog,sizeof(pelog)); rc = stat(pelog,&sbuf); } if (rc == -1) { if (errno == ENOENT || errno == EBADF) { /* epilog/prolog script does not exist */ if (LOGLEVEL >= 5) { static char tmpBuf[1024]; sprintf(log_buffer, "%s script '%s' for job %s does not exist (cwd: %s,pid: %d)", PPEType[which], (pelog != NULL) ? pelog : "NULL", (pjob != NULL) ? pjob->ji_qs.ji_jobid : "NULL", getcwd(tmpBuf, sizeof(tmpBuf)), (int)getpid()); log_record(PBSEVENT_SYSTEM, 0, id, log_buffer); } #ifdef ENABLE_CSA if ((which == PE_EPILOGUSER) && (!strcmp(pelog, path_epiloguser))) { /* * Add a workload management end record */ if (LOGLEVEL >= 8) { sprintf(log_buffer, "%s calling add_wkm_end from run_pelog() - no user epilog", pjob->ji_qs.ji_jobid); log_err(-1, id, log_buffer); } add_wkm_end(pjob->ji_wattr[(int)JOB_ATR_pagg_id].at_val.at_ll, pjob->ji_qs.ji_un.ji_momt.ji_exitstat, pjob->ji_qs.ji_jobid); } #endif /* ENABLE_CSA */ return(0); } return(pelog_err(pjob,pelog,errno,"cannot stat")); } if (LOGLEVEL >= 5) { sprintf(log_buffer,"running %s script '%s' for job %s", PPEType[which], (pelog != NULL) ? pelog : "NULL", pjob->ji_qs.ji_jobid); log_ext(-1,id,log_buffer,LOG_DEBUG); /* not actually an error--but informational */ } /* script must be owned by root, be regular file, read and execute by user * * and not writeable by group or other */ if(which == PE_PROLOGUSERJOB || which == PE_EPILOGUSERJOB) { if ((sbuf.st_uid != pjob->ji_qs.ji_un.ji_momt.ji_exuid) || (!S_ISREG(sbuf.st_mode)) || ((sbuf.st_mode & (S_IRUSR | S_IXUSR)) != (S_IRUSR | S_IXUSR)) || (sbuf.st_mode & (S_IWGRP | S_IWOTH))) { return(pelog_err(pjob,pelog,-1,"permission Error")); } } else if ((sbuf.st_uid != 0) || (!S_ISREG(sbuf.st_mode)) || ((sbuf.st_mode & (S_IRUSR | S_IXUSR)) != (S_IRUSR | S_IXUSR)) || (sbuf.st_mode & (S_IWGRP | S_IWOTH))) { return(pelog_err(pjob,pelog,-1,"permission Error")); } if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER)) { /* script must also be read and execute by other */ if ((sbuf.st_mode & (S_IROTH | S_IXOTH)) != (S_IROTH | S_IXOTH)) { return(pelog_err(pjob, pelog, -1, "permission Error")); } } fd_input = pe_input(pjob->ji_qs.ji_jobid); if (fd_input < 0) { return(pelog_err(pjob, pelog, -2, "no pro/epilogue input file")); } run_exit = 0; child = fork(); if (child > 0) { int KillSent = FALSE; /* parent - watch for prolog/epilog to complete */ close(fd_input); act.sa_handler = pelogalm; sigemptyset(&act.sa_mask); act.sa_flags = 0; sigaction(SIGALRM, &act, &oldact); /* it would be nice if the harvest routine could block for 5 seconds, and if the prolog is not complete in that time, mark job as prolog pending, append prolog child, and continue */ /* main loop should attempt to harvest prolog in non-blocking mode. If unsuccessful after timeout, job should be terminated, and failure reported. If successful, mom should unset prolog pending, and continue with job start sequence. Mom should report job as running while prologpending flag is set. (NOTE: must track per job prolog start time) */ alarm(pe_alarm_time); while (waitpid(child, &waitst, 0) < 0) { if (errno != EINTR) { /* exit loop. non-alarm based failure occurred */ run_exit = -3; MOMPrologFailureCount++; break; } if (run_exit == -4) { if (KillSent == FALSE) { MOMPrologTimeoutCount++; /* timeout occurred */ KillSent = TRUE; /* NOTE: prolog/epilog may be locked in KERNEL space and unkillable */ alarm(5); } else { /* cannot kill prolog/epilog, give up */ run_exit = -5; break; } } } /* END while (wait(&waitst) < 0) */ /* epilog/prolog child completed */ #ifdef ENABLE_CSA if ((which == PE_EPILOGUSER) && (!strcmp(pelog, path_epiloguser))) { /* * Add a workload management end record */ if (LOGLEVEL >= 8) { sprintf(log_buffer, "%s calling add_wkm_end from run_pelog() - after user epilog", pjob->ji_qs.ji_jobid); log_err(-1, id, log_buffer); } add_wkm_end(pjob->ji_wattr[(int)JOB_ATR_pagg_id].at_val.at_ll, pjob->ji_qs.ji_un.ji_momt.ji_exitstat, pjob->ji_qs.ji_jobid); } #endif /* ENABLE_CSA */ alarm(0); /* restore the previous handler */ sigaction(SIGALRM, &oldact, 0); if (run_exit == 0) { if (WIFEXITED(waitst)) { run_exit = WEXITSTATUS(waitst); } } } else { /* child - run script */ log_close(0); if (lockfds >= 0) { close(lockfds); lockfds = -1; } net_close(-1); if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER) || (which == PE_PROLOGUSERJOB) || which == PE_EPILOGUSERJOB) { if (setgroups( pjob->ji_grpcache->gc_ngroup, (gid_t *)pjob->ji_grpcache->gc_groups) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setgroups() for UID = %lu failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, id, log_buffer); exit(255); } if (setgid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setgid(%lu) for UID = %lu failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exgid, (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, id, log_buffer); exit(255); } if (setuid(pjob->ji_qs.ji_un.ji_momt.ji_exuid) != 0) { snprintf(log_buffer,sizeof(log_buffer), "setuid(%lu) failed: %s\n", (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid, strerror(errno)); log_err(errno, id, log_buffer); exit(255); } } if (fd_input != 0) { close(0); if (dup(fd_input) == -1) {} close(fd_input); } if (pe_io_type == PE_IO_TYPE_NULL) { /* no output, force to /dev/null */ fds1 = open("/dev/null", O_WRONLY, 0600); fds2 = open("/dev/null", O_WRONLY, 0600); } else if (pe_io_type == PE_IO_TYPE_STD) { /* open job standard out/error */ /* * We need to know if files are joined or not. * If they are then open the correct file and duplicate it to the other */ isjoined = is_joined(pjob); switch (isjoined) { case -1: fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds1 = dup(fds2); break; case 1: fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds2 = dup(fds1); break; default: fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); break; } } if (pe_io_type != PE_IO_TYPE_ASIS) { /* If PE_IO_TYPE_ASIS, leave as is, already open to job */ if (fds1 != 1) { close(1); if (dup(fds1) == -1) {} close(fds1); } if (fds2 != 2) { close(2); if (dup(fds2) == -1) {} close(fds2); } } if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER) || (which == PE_PROLOGUSERJOB) || (which == PE_EPILOGUSERJOB)) { if (chdir(pjob->ji_grpcache->gc_homedir) != 0) { /* warn only, no failure */ sprintf(log_buffer, "PBS: chdir to %s failed: %s (running user %s in current directory)", pjob->ji_grpcache->gc_homedir, strerror(errno), which == PE_PROLOGUSER ? "prologue" : "epilogue"); if (write(2, log_buffer, strlen(log_buffer)) == -1) {} fsync(2); } } /* for both prolog and epilog */ if (DEBUGMODE == 1) { fprintf(stderr, "PELOGINFO: script:'%s' jobid:'%s' euser:'******' egroup:'%s' jobname:'%s' SSID:'%ld' RESC:'%s'\n", pelog, pjob->ji_qs.ji_jobid, pjob->ji_wattr[(int)JOB_ATR_euser].at_val.at_str, pjob->ji_wattr[(int)JOB_ATR_egroup].at_val.at_str, pjob->ji_wattr[(int)JOB_ATR_jobname].at_val.at_str, pjob->ji_wattr[(int)JOB_ATR_session_id].at_val.at_long, resc_to_string(pjob, (int)JOB_ATR_resource, resc_list, sizeof(resc_list))); } arg[0] = pelog; arg[1] = pjob->ji_qs.ji_jobid; arg[2] = pjob->ji_wattr[(int)JOB_ATR_euser].at_val.at_str; arg[3] = pjob->ji_wattr[(int)JOB_ATR_egroup].at_val.at_str; arg[4] = pjob->ji_wattr[(int)JOB_ATR_jobname].at_val.at_str; set_resource_vars(pjob,NULL); /* NOTE: inside child */ if ( which == PE_EPILOG || which == PE_EPILOGUSER || which == PE_EPILOGUSERJOB ) { /* for epilog only */ sprintf(sid, "%ld", pjob->ji_wattr[(int)JOB_ATR_session_id].at_val.at_long); sprintf(exit_stat,"%d", pjob->ji_qs.ji_un.ji_exect.ji_exitstat); arg[5] = sid; arg[6] = resc_to_string(pjob, (int)JOB_ATR_resource, resc_list, sizeof(resc_list)); arg[7] = resc_to_string(pjob, (int)JOB_ATR_resc_used, resc_used, sizeof(resc_used)); arg[8] = pjob->ji_wattr[(int)JOB_ATR_in_queue].at_val.at_str; arg[9] = pjob->ji_wattr[(int)JOB_ATR_account].at_val.at_str; arg[10] = exit_stat; arg[11] = NULL; LastArg = 11; } else if (which == PE_MAGRATHEA) { char *cc = NULL, *c = NULL; setenv("MAGRATHEA_CLUSTER",pjob->ji_wattr[(int)JOB_ATR_jobname].at_val.at_str,1); if ((pjob->ji_wattr[(int)JOB_ATR_cloud_mapping].at_flags & ATR_VFLAG_SET) && (pjob->ji_wattr[(int)JOB_ATR_cloud_mapping].at_val.at_str)) { c = cloud_mom_mapping(pjob->ji_wattr[(int)JOB_ATR_cloud_mapping].at_val.at_str,mom_host,&cc); } if (c) arg[5]=c; else arg[5]=mom_host; setenv("MAGRATHEA_VIRTUAL_HOST",arg[5],1); if (cc) { setenv("MAGRATHEA_VIRTUAL_ALTERNATIVE",cc,1); free(cc); } if (pjob->ji_wattr[(int)JOB_ATR_vlan_id].at_val.at_str != NULL ) { setenv("MAGRATHEA_VLANID",pjob->ji_wattr[(int)JOB_ATR_vlan_id].at_val.at_str,1); } switch (is_cloud_job(pjob)) { case 1: setenv("MAGRATHEA_TYPE","create",1); break; case 2: setenv("MAGRATHEA_TYPE","internal",1); break; default: setenv("MAGRATHEA_TYPE","none",1); break; } arg[6]=(char *)0; LastArg = 6; } else { /* prolog */ arg[5] = resc_to_string(pjob, (int)JOB_ATR_resource, resc_list, sizeof(resc_list)); arg[6] = pjob->ji_wattr[(int)JOB_ATR_in_queue].at_val.at_str; arg[7] = pjob->ji_wattr[(int)JOB_ATR_account].at_val.at_str; arg[8] = NULL; LastArg = 8; } for (aindex = 0;aindex < LastArg;aindex++) { if (arg[aindex] == NULL) arg[aindex] = EmptyString; } /* END for (aindex) */ /* * Pass Resource_List.nodes request in environment * to allow pro/epi-logue setup/teardown of system * settings. --pw, 2 Jan 02 * Fixed to use putenv for sysV compatibility. * --troy, 11 jun 03 * */ r = find_resc_entry( &pjob->ji_wattr[(int)JOB_ATR_resource], find_resc_def(svr_resc_def, "nodes", svr_resc_size)); if (r != NULL) { /* setenv("PBS_RESOURCE_NODES",r->rs_value.at_val.at_str,1); */ const char *envname = "PBS_RESOURCE_NODES="; char *envstr; envstr = malloc( (strlen(envname) + strlen(r->rs_value.at_val.at_str) + 1) * sizeof(char)); if (envstr != NULL) { strcpy(envstr,envname); strcat(envstr,r->rs_value.at_val.at_str); /* do _not_ free the string when using putenv */ putenv(envstr); } } /* END if (r != NULL) */ r = find_resc_entry( &pjob->ji_wattr[(int)JOB_ATR_resource], find_resc_def(svr_resc_def, "gres", svr_resc_size)); if (r != NULL) { /* setenv("PBS_RESOURCE_NODES",r->rs_value.at_val.at_str,1); */ const char *envname = "PBS_RESOURCE_GRES="; char *envstr; envstr = malloc( (strlen(envname) + strlen(r->rs_value.at_val.at_str) + 1) * sizeof(char)); if (envstr != NULL) { strcpy(envstr,envname); strcat(envstr,r->rs_value.at_val.at_str); /* do _not_ free the string when using putenv */ putenv(envstr); } } /* END if (r != NULL) */ if (TTmpDirName(pjob, buf)) { const char *envname = "TMPDIR="; char *envstr; envstr = malloc( (strlen(envname) + strlen(buf) + 1) * sizeof(char)); if (envstr != NULL) { strcpy(envstr,envname); strcat(envstr,buf); /* do _not_ free the string when using putenv */ putenv(envstr); } } /* END if (TTmpDirName(pjob,&buf)) */ /* Set PBS_SCHED_HINT */ { char *envname = "PBS_SCHED_HINT"; char *envval; char *envstr; if ((envval = get_job_envvar(pjob, envname)) != NULL) { envstr = malloc((strlen(envname) + strlen(envval) + 2) * sizeof(char)); if (envstr != NULL) { sprintf(envstr,"%s=%s", envname, envval); putenv(envstr); } } } /* Set PBS_NODENUM */ { char *envname = "PBS_NODENUM"; char *envstr; sprintf(buf, "%d", pjob->ji_nodeid); envstr = malloc((strlen(envname) + strlen(buf) + 2) * sizeof(char)); if (envstr != NULL) { sprintf(envstr,"%s=%d", envname, pjob->ji_nodeid); putenv(envstr); } } /* Set PBS_MSHOST */ { char *envname = "PBS_MSHOST"; char *envstr; if ((pjob->ji_vnods[0].vn_host != NULL) && (pjob->ji_vnods[0].vn_host->hn_host != NULL)) { envstr = malloc((strlen(envname) + strlen(pjob->ji_vnods[0].vn_host->hn_host) + 2) * sizeof(char)); if (envstr != NULL) { sprintf(envstr,"%s=%s", envname, pjob->ji_vnods[0].vn_host->hn_host); putenv(envstr); } } } /* Set PBS_NODEFILE */ { char *envname = "PBS_NODEFILE"; char *envstr; if (pjob->ji_flags & MOM_HAS_NODEFILE) { sprintf(buf, "%s/%s", path_aux, pjob->ji_qs.ji_jobid); envstr = malloc((strlen(envname) + strlen(buf) + 2) * sizeof(char)); if (envstr != NULL) { sprintf(envstr,"%s=%s", envname, buf); putenv(envstr); } } } /* Set umask */ if (pjob->ji_wattr[(int)JOB_ATR_umask].at_flags & ATR_VFLAG_SET) { char *buf = calloc(strlen("PBS_UMASK=")+16,1); if (buf != NULL) { sprintf(buf,"PBS_UMASK=%#o",pjob->ji_wattr[(int)JOB_ATR_umask].at_val.at_long); putenv(buf); } } /* Set PBS_O_Workdir */ { char *envname = "PBS_O_WORKDIR"; char *workdir_val; char *envstr; workdir_val = get_job_envvar(pjob,envname); if (workdir_val != NULL) { envstr = malloc((strlen(workdir_val) + strlen(envname) + 2) * sizeof(char)); if (envstr != NULL) { sprintf(envstr,"%s=%s", envname, workdir_val); putenv(envstr); } } } /* SET BEOWULF_JOB_MAP */ { struct array_strings *vstrs; int VarIsSet = 0; int j; vstrs = pjob->ji_wattr[(int)JOB_ATR_variables].at_val.at_arst; for (j = 0;j < vstrs->as_usedptr;++j) { if (!strncmp( vstrs->as_string[j], "BEOWULF_JOB_MAP=", strlen("BEOWULF_JOB_MAP="))) { VarIsSet = 1; break; } } if (VarIsSet == 1) { char *envstr; envstr = malloc((strlen(vstrs->as_string[j])) * sizeof(char)); if (envstr != NULL) { strcpy(envstr,vstrs->as_string[j]); putenv(envstr); } } } execv(pelog,arg); sprintf(log_buffer,"execv of %s failed: %s\n", pelog, strerror(errno)); if (write(2, log_buffer, strlen(log_buffer)) == -1) { /* cannot write message to stderr */ /* NO-OP */ } fsync(2); exit(255); } /* END else () */ switch (run_exit) { case 0: /* SUCCESS */ /* NO-OP */ break; case - 3: pelog_err(pjob, pelog, run_exit, "child wait interrupted"); break; case - 4: pelog_err(pjob, pelog, run_exit, "prolog/epilog timeout occurred, child cleaned up"); break; case - 5: pelog_err(pjob, pelog, run_exit, "prolog/epilog timeout occurred, cannot kill child"); break; default: pelog_err(pjob, pelog, run_exit, "nonzero p/e exit status"); break; } /* END switch (run_exit) */ return(run_exit); } /* END run_pelog() */
void setup_pelog_outputs( job *pjob, int pe_io_type, int delete_job, char *specpelog) { int fds1 = 0; int fds2 = 0; if (pe_io_type == PE_IO_TYPE_NULL) { /* no output, force to /dev/null */ fds1 = open("/dev/null", O_WRONLY, 0600); fds2 = open("/dev/null", O_WRONLY, 0600); } else if (pe_io_type == PE_IO_TYPE_STD) { /* open job standard out/error */ /* * We need to know if files are joined or not. * If they are then open the correct file and duplicate it to the other */ int isjoined = is_joined(pjob); switch (isjoined) { case -1: fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds1 = (fds2 < 0)?-1:dup(fds2); break; case 1: fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds2 = (fds1 < 0)?-1:dup(fds1); break; default: fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND, pjob->ji_qs.ji_un.ji_momt.ji_exgid); break; } } /* * dupeStdFiles is a flag for those that couldn't open their .OU/.ER files */ int dupeStdFiles = 1; if (!delete_job) { if ((fds1 < 0) || (fds2 < 0)) { if (fds1 >= 0) close(fds1); if (fds2 >= 0) close(fds2); if ((pe_io_type == PE_IO_TYPE_STD) && (strlen(specpelog) == strlen(path_epilogp)) && (strcmp(path_epilogp, specpelog) == 0)) dupeStdFiles = 0; else exit(-1); } } if (pe_io_type != PE_IO_TYPE_ASIS) { /* If PE_IO_TYPE_ASIS, leave as is, already open to job */ /* dup only for those fds1 >= 0 */ if (fds1 != 1) { close(1); if (dupeStdFiles) { if ((fds1 >= 0)&&(dup(fds1) >= 0)) close(fds1); } } if (fds2 != 2) { close(2); if (dupeStdFiles) { if ((fds2 >= 0)&&(dup(fds2) >= 0)) close(fds2); } } } } /* END setup_pelog_outputs() */