Пример #1
0
 /*
 * job_unlink_file - unlink file, but drop root credentials before
 * doing this to avoid removing objects that aren't belong to the user.
 */
int job_unlink_file(

  job        *pjob,  /* I */
  const char *name)	 /* I */

  {
  int   saved_errno = 0;
  int   result = 0;
  uid_t uid = geteuid();
  gid_t gid = getegid();

  if (uid != 0)
    return unlink(name);

  if ((setegid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) == -1))
    return -1;
  if ((setuid_ext(pjob->ji_qs.ji_un.ji_momt.ji_exuid, TRUE) == -1))
    {
    saved_errno = errno;
    setegid(gid);
    errno = saved_errno;
    return -1;
    }
  result = unlink(name);
  saved_errno = errno;

  setuid_ext(uid, TRUE);
  setegid(gid);

  errno = saved_errno;
  return(result);
  }  /* END job_unlink_file() */
Пример #2
0
int undo_set_euid_egid(

  int         which,
  uid_t       real_uid,
  gid_t       real_gid,
  int         num_gids,
  gid_t      *real_gids,
  const char *id)
    
  {
  if ((which == PE_PROLOGUSER) || 
      (which == PE_EPILOGUSER) || 
      (which == PE_PROLOGUSERJOB) || 
      (which == PE_EPILOGUSERJOB))
    {
    if ((setuid_ext(real_uid, TRUE) != 0) ||
        (setegid(real_gid) != 0) ||
        (setgroups(num_gids,real_gids) != 0))
      {
      log_err(errno,id, (char *)"Couldn't revert back to the root user - IMMINENT FAILURE but will try to continue\n");
      }

    return(-1);
    }

  return(0);
  } /* END undo_set_euid_egid() */
Пример #3
0
int run_pelog(

  int   which,      /* I (one of PE_*) */
  char *specpelog,  /* I - script path */
  job  *pjob,       /* I - associated job */
  int   pe_io_type, /* I - io type */
  int   deletejob)  /* I - called before a job being deleted (purge -p) */

  {
  struct sigaction  act;
  struct sigaction  oldact;
  char             *arg[12];
  int               fds1 = 0;
  int               fds2 = 0;
  int               fd_input;
  char              resc_list[2048];
  char              resc_used[2048];

  struct stat       sbuf;
  char              sid[20];
  char              exit_stat[11];
  int               waitst;
  int               isjoined;  /* boolean */
  char              buf[MAXPATHLEN + 1024];
  char              pelog[MAXPATHLEN + 1024];

  uid_t             real_uid;
  gid_t            *real_gids = NULL;
  gid_t             real_gid;
  int               num_gids;

  int               jobtypespecified = 0;

  resource         *r;

  char             *EmptyString = (char *)"";

  int               LastArg;
  int               aindex;

  int               rc;

  char             *ptr;

  int               moabenvcnt = 14;  /* # of entries in moabenvs */
  static char      *moabenvs[] = {
      (char *)"MOAB_NODELIST",
      (char *)"MOAB_JOBID",
      (char *)"MOAB_JOBNAME",
      (char *)"MOAB_USER",
      (char *)"MOAB_GROUP",
      (char *)"MOAB_CLASS",
      (char *)"MOAB_TASKMAP",
      (char *)"MOAB_QOS",
      (char *)"MOAB_PARTITION",
      (char *)"MOAB_PROCCOUNT",
      (char *)"MOAB_NODECOUNT",
      (char *)"MOAB_MACHINE",
      (char *)"MOAB_JOBARRAYINDEX",
      (char *)"MOAB_JOBARRAYRANGE"
      };

  if ((pjob == NULL) || (specpelog == NULL) || (specpelog[0] == '\0'))
    {
    return(0);
    }

  ptr = pjob->ji_wattr[JOB_ATR_jobtype].at_val.at_str;

  if (ptr != NULL)
    {
    jobtypespecified = 1;

    snprintf(pelog,sizeof(pelog),"%s.%s",
      specpelog,
      ptr);
    }
  else
    {
    snprintf(pelog, sizeof(pelog), "%s", specpelog);
    }
    
  real_uid = getuid();
  real_gid = getgid();
  if ((num_gids = getgroups(0, real_gids)) < 0)
    {
    log_err(errno, __func__, (char *)"getgroups failed\n");
    
    return(-1);
    }

  /* to support root squashing, become the user before performing file checks */
  if ((which == PE_PROLOGUSER) || 
      (which == PE_EPILOGUSER) || 
      (which == PE_PROLOGUSERJOB) || 
      (which == PE_EPILOGUSERJOB))
    {

    real_gids = (gid_t *)calloc(num_gids, sizeof(gid_t));
    
    if (real_gids == NULL)
      {
      log_err(ENOMEM, __func__, (char *)"Cannot allocate memory! FAILURE\n");
      
      return(-1);
      }
    
    if (getgroups(num_gids,real_gids) < 0)
      {
      log_err(errno, __func__, (char *)"getgroups failed\n");
      free(real_gids);
      
      return(-1);
      }
    
    /* pjob->ji_grpcache will not be set if using LDAP and LDAP not set */
    /* It is possible that ji_grpcache failed to allocate as well. 
       Make sure ji_grpcache is not NULL */
    if (pjob->ji_grpcache != NULL)
      {
      if (setgroups(
            pjob->ji_grpcache->gc_ngroup,
            (gid_t *)pjob->ji_grpcache->gc_groups) != 0)
        {
        snprintf(log_buffer,sizeof(log_buffer),
          "setgroups() for UID = %lu failed: %s\n",
          (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
          strerror(errno));
      
        log_err(errno, __func__, log_buffer);
      
        undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
        free(real_gids);
      
        return(-1);
        }
      }
    else
      {
      sprintf(log_buffer, "pjob->ji_grpcache is null. check_pwd likely failed.");
      log_err(-1, __func__, log_buffer);
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      return(-1);
      }
    
    if (setegid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) != 0)
      {
      snprintf(log_buffer,sizeof(log_buffer),
        "setegid(%lu) for UID = %lu failed: %s\n",
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exgid,
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
        strerror(errno));
      
      log_err(errno, __func__, log_buffer);
      
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      
      return(-1);
      }
    
    if (setuid_ext(pjob->ji_qs.ji_un.ji_momt.ji_exuid, TRUE) != 0)
      {
      snprintf(log_buffer,sizeof(log_buffer),
        "seteuid(%lu) failed: %s\n",
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
        strerror(errno));
      
      log_err(errno, __func__, log_buffer);
      
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);

      return(-1);
      }
    }

  rc = stat(pelog,&sbuf);

  if ((rc == -1) && (jobtypespecified == 1))
    {
    snprintf(pelog, sizeof(pelog), "%s", specpelog);

    rc = stat(pelog,&sbuf);
    }

  if (rc == -1)
    {
    if (errno == ENOENT || errno == EBADF)
      {
      /* epilog/prolog script does not exist */

      if (LOGLEVEL >= 5)
        {
        static char tmpBuf[1024];

        sprintf(log_buffer, "%s script '%s' for job %s does not exist (cwd: %s,pid: %d)",
          PPEType[which],
          (pelog[0] != '\0') ? pelog : "NULL",
          pjob->ji_qs.ji_jobid,
          getcwd(tmpBuf, sizeof(tmpBuf)),
          getpid());

        log_record(PBSEVENT_SYSTEM, 0, __func__, log_buffer);
        }

#ifdef ENABLE_CSA
      if ((which == PE_EPILOGUSER) && (!strcmp(pelog, path_epiloguser)))
        {
        /*
          * Add a workload management end record
        */
        if (LOGLEVEL >= 8)
          {
          sprintf(log_buffer, "%s calling add_wkm_end from run_pelog() - no user epilog",
            pjob->ji_qs.ji_jobid);

          log_err(-1, __func__, log_buffer);
          }

        add_wkm_end(pjob->ji_wattr[JOB_ATR_pagg_id].at_val.at_ll,
            pjob->ji_qs.ji_un.ji_momt.ji_exitstat, pjob->ji_qs.ji_jobid);
        }

#endif /* ENABLE_CSA */

      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);

      return(0);
      }
      
    undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
    free(real_gids);

    return(pelog_err(pjob,pelog,errno,(char *)"cannot stat"));
    }

  if (LOGLEVEL >= 5)
    {
    sprintf(log_buffer,"running %s script '%s' for job %s",
      PPEType[which],
      (pelog[0] != '\0') ? pelog : "NULL",
      pjob->ji_qs.ji_jobid);

    log_ext(-1, __func__, log_buffer, LOG_DEBUG);  /* not actually an error--but informational */
    }

  /* script must be owned by root, be regular file, read and execute by user *
   * and not writeable by group or other */

  if (reduceprologchecks == TRUE)
    {
    if ((!S_ISREG(sbuf.st_mode)) ||
        (!(sbuf.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))))
      {
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      return(pelog_err(pjob,pelog,-1, (char *)"permission Error"));
      }
    }
  else
    {
    if (which == PE_PROLOGUSERJOB || which == PE_EPILOGUSERJOB)
      {
      if ((sbuf.st_uid != pjob->ji_qs.ji_un.ji_momt.ji_exuid) || 
          (!S_ISREG(sbuf.st_mode)) ||
          ((sbuf.st_mode & (S_IRUSR | S_IXUSR)) != (S_IRUSR | S_IXUSR)) ||
          (sbuf.st_mode & (S_IWGRP | S_IWOTH)))
        {
        undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
        free(real_gids);
        return(pelog_err(pjob,pelog,-1, (char *)"permission Error"));
        }
      }
    else if ((sbuf.st_uid != 0) ||
        (!S_ISREG(sbuf.st_mode)) ||
        ((sbuf.st_mode & (S_IRUSR | S_IXUSR)) != (S_IRUSR | S_IXUSR)) ||\
        (sbuf.st_mode & (S_IWGRP | S_IWOTH)))
      {
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      return(pelog_err(pjob,pelog,-1, (char *)"permission Error"));
      }
    
    if ((which == PE_PROLOGUSER) || (which == PE_EPILOGUSER))
      {
      /* script must also be read and execute by other */
      
      if ((sbuf.st_mode & (S_IROTH | S_IXOTH)) != (S_IROTH | S_IXOTH))
        {
        undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
        free(real_gids);
        return(pelog_err(pjob, pelog, -1,  (char *)"permission Error"));
        }
      }
    } /* END !reduceprologchecks */

  fd_input = pe_input(pjob->ji_qs.ji_jobid);

  if (fd_input < 0)
    {
    undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
    free(real_gids);
    return(pelog_err(pjob, pelog, -2,  (char *)"no pro/epilogue input file"));
    }

  run_exit = 0;

  child = fork();

  if (child > 0)
    {
    int KillSent = FALSE;

    /* parent - watch for prolog/epilog to complete */

    close(fd_input);

    /* switch back to root if necessary */
    undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
    free(real_gids);

    act.sa_handler = pelogalm;

    sigemptyset(&act.sa_mask);

    act.sa_flags = 0;

    sigaction(SIGALRM, &act, &oldact);

    /* it would be nice if the harvest routine could block for 5 seconds,
       and if the prolog is not complete in that time, mark job as prolog
       pending, append prolog child, and continue */

    /* main loop should attempt to harvest prolog in non-blocking mode.
       If unsuccessful after timeout, job should be terminated, and failure
       reported.  If successful, mom should unset prolog pending, and
       continue with job start sequence.  Mom should report job as running
       while prologpending flag is set.  (NOTE:  must track per job prolog
       start time)
    */

    alarm(pe_alarm_time);

    while (waitpid(child, &waitst, 0) < 0)
      {
      if (errno != EINTR)
        {
        /* exit loop. non-alarm based failure occurred */

        run_exit = -3;

        MOMPrologFailureCount++;

        break;
        }

      if (run_exit == -4)
        {
        if (KillSent == FALSE)
          {
          MOMPrologTimeoutCount++;

          /* timeout occurred */

          KillSent = TRUE;

          /* NOTE:  prolog/epilog may be locked in KERNEL space and unkillable */

          alarm(5);
          }
        else
          {
          /* cannot kill prolog/epilog, give up */

          run_exit = -5;

          break;
          }
        }
      }    /* END while (wait(&waitst) < 0) */

    /* epilog/prolog child completed */
#ifdef ENABLE_CSA
    if ((which == PE_EPILOGUSER) && (!strcmp(pelog, path_epiloguser)))
      {
      /*
       * Add a workload management end record
      */
      if (LOGLEVEL >= 8)
        {
        sprintf(log_buffer, "%s calling add_wkm_end from run_pelog() - after user epilog",
                pjob->ji_qs.ji_jobid);

        log_err(-1, __func__, log_buffer);
        }

      add_wkm_end(pjob->ji_wattr[JOB_ATR_pagg_id].at_val.at_ll,
          pjob->ji_qs.ji_un.ji_momt.ji_exitstat, pjob->ji_qs.ji_jobid);
      }

#endif /* ENABLE_CSA */

    alarm(0);

    /* restore the previous handler */

    sigaction(SIGALRM, &oldact, 0);

    if (run_exit == 0)
      {
      if (WIFEXITED(waitst))
        {
        run_exit = WEXITSTATUS(waitst);
        }
      }
    }
  else
    {
    /* child - run script */

    log_close(0);

    if (lockfds >= 0)
      {
      close(lockfds);

      lockfds = -1;
      }

    net_close(-1);

    if (fd_input != 0)
      {
      close(0);

      if (dup(fd_input) == -1) {}

      close(fd_input);
      }

    if (pe_io_type == PE_IO_TYPE_NULL)
      {
      /* no output, force to /dev/null */

      fds1 = open("/dev/null", O_WRONLY, 0600);
      fds2 = open("/dev/null", O_WRONLY, 0600);
      }
    else if (pe_io_type == PE_IO_TYPE_STD)
      {
      /* open job standard out/error */

      /*
       * We need to know if files are joined or not.
       * If they are then open the correct file and duplicate it to the other
      */

      isjoined = is_joined(pjob);

      switch (isjoined)
        {
        case -1:

          fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND,
                               pjob->ji_qs.ji_un.ji_momt.ji_exgid);

          fds1 = (fds2 < 0)?-1:dup(fds2);

          break;

        case 1:

          fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND,
                               pjob->ji_qs.ji_un.ji_momt.ji_exgid);

          fds2 = (fds1 < 0)?-1:dup(fds1);

          break;

        default:

          fds1 = open_std_file(pjob, StdOut, O_WRONLY | O_APPEND,
                               pjob->ji_qs.ji_un.ji_momt.ji_exgid);

          fds2 = open_std_file(pjob, StdErr, O_WRONLY | O_APPEND,
                               pjob->ji_qs.ji_un.ji_momt.ji_exgid);
          break;
        }
      }

    if (!deletejob)
      if ((fds1 < 0) ||
          (fds2 < 0))
        {
        if (fds1 >= 0)
          close(fds1);
        if (fds2 >= 0)
          close(fds2);

        exit(-1);
        }

    if (pe_io_type != PE_IO_TYPE_ASIS)
      {
      /* If PE_IO_TYPE_ASIS, leave as is, already open to job */

      if (fds1 != 1)
        {
        close(1);

        if (dup(fds1) >= 0)
          {
          close(fds1);
          }
        }

      if (fds2 != 2)
        {
        close(2);

        if (dup(fds2) >= 0)
          {
          close(fds2);
          }
        }
      }

    if ((which == PE_PROLOGUSER) || 
        (which == PE_EPILOGUSER) || 
        (which == PE_PROLOGUSERJOB) || 
        (which == PE_EPILOGUSERJOB))
      {
      if (chdir(pjob->ji_grpcache->gc_homedir) != 0)
        {
        /* warn only, no failure */

        sprintf(log_buffer,
          "PBS: chdir to %s failed: %s (running user %s in current directory)",
          pjob->ji_grpcache->gc_homedir,
          strerror(errno),
          which == PE_PROLOGUSER ? "prologue" : "epilogue");

        if (write_ac_socket(2, log_buffer, strlen(log_buffer)) == -1) {}

        fsync(2);
        }
      }

    /* for both prolog and epilog */

    if (DEBUGMODE == 1)
      {
      fprintf(stderr, "PELOGINFO:  script:'%s'  jobid:'%s'  euser:'******'  egroup:'%s'  jobname:'%s' SSID:'%ld'  RESC:'%s'\n",
              pelog,
              pjob->ji_qs.ji_jobid,
              pjob->ji_wattr[JOB_ATR_euser].at_val.at_str,
              pjob->ji_wattr[JOB_ATR_egroup].at_val.at_str,
              pjob->ji_wattr[JOB_ATR_jobname].at_val.at_str,
              pjob->ji_wattr[JOB_ATR_session_id].at_val.at_long,
              resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list)));
      }

    arg[0] = pelog;

    arg[1] = pjob->ji_qs.ji_jobid;
    arg[2] = pjob->ji_wattr[JOB_ATR_euser].at_val.at_str;
    arg[3] = pjob->ji_wattr[JOB_ATR_egroup].at_val.at_str;
    arg[4] = pjob->ji_wattr[JOB_ATR_jobname].at_val.at_str;

    /* NOTE:  inside child */

    if ((which == PE_EPILOG) || 
        (which == PE_EPILOGUSER) || 
        (which == PE_EPILOGUSERJOB))
      {
      /* for epilog only */

      sprintf(sid, "%ld",
              pjob->ji_wattr[JOB_ATR_session_id].at_val.at_long);
      sprintf(exit_stat,"%d",
              pjob->ji_qs.ji_un.ji_momt.ji_exitstat);

      arg[5] = sid;
      arg[6] = resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list));
      arg[7] = resc_to_string(pjob, JOB_ATR_resc_used, resc_used, sizeof(resc_used));
      arg[8] = pjob->ji_wattr[JOB_ATR_in_queue].at_val.at_str;
      arg[9] = pjob->ji_wattr[JOB_ATR_account].at_val.at_str;
      arg[10] = exit_stat;
      arg[11] = NULL;

      LastArg = 11;
      }
    else
      {
      /* prolog */

      arg[5] = resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list));
      arg[6] = pjob->ji_wattr[JOB_ATR_in_queue].at_val.at_str;
      arg[7] = pjob->ji_wattr[JOB_ATR_account].at_val.at_str;
      arg[8] = NULL;

      LastArg = 8;
      }

    for (aindex = 0;aindex < LastArg;aindex++)
      {
      if (arg[aindex] == NULL)
        arg[aindex] = EmptyString;
      }  /* END for (aindex) */

    /*
     * Pass Resource_List.nodes request in environment
     * to allow pro/epi-logue setup/teardown of system
     * settings.  --pw, 2 Jan 02
     * Fixed to use putenv for sysV compatibility.
     *  --troy, 11 jun 03
     *
     */

    r = find_resc_entry(
          &pjob->ji_wattr[JOB_ATR_resource],
          find_resc_def(svr_resc_def, (char *)"nodes", svr_resc_size));

    if (r != NULL)
      {
      /* setenv("PBS_RESOURCE_NODES",r->rs_value.at_val.at_str,1); */

      const char *ppn_str = "ppn=";
      int num_nodes = 1;
      int num_ppn = 1;

      /* PBS_RESOURCE_NODES */
      put_env_var("PBS_RESOURCE_NODES", r->rs_value.at_val.at_str);

      /* PBS_NUM_NODES */
      num_nodes = strtol(r->rs_value.at_val.at_str, NULL, 10);

      /* 
       * InitUserEnv() also calculates num_nodes and num_ppn the same way
       */
      if (num_nodes != 0)
        {
        char *tmp;
        char *other_reqs;

        /* get the ppn */
        if ((tmp = strstr(r->rs_value.at_val.at_str,ppn_str)) != NULL)
          {
          tmp += strlen(ppn_str);

          num_ppn = strtol(tmp, NULL, 10);
          }

        other_reqs = r->rs_value.at_val.at_str;

        while ((other_reqs = strchr(other_reqs, '+')) != NULL)
          {
          other_reqs += 1;
          num_nodes += strtol(other_reqs, &other_reqs, 10);
          }
        }

      sprintf(buf, "%d", num_nodes);
      put_env_var("PBS_NUM_NODES", buf);

      /* PBS_NUM_PPN */
      sprintf(buf, "%d", num_ppn);
      put_env_var("PBS_NUM_PPN", buf);

      /* PBS_NP */
      sprintf(buf, "%d", pjob->ji_numvnod);
      put_env_var("PBS_NP", buf);
      }  /* END if (r != NULL) */

    r = find_resc_entry(
          &pjob->ji_wattr[JOB_ATR_resource],
          find_resc_def(svr_resc_def, (char *)"gres", svr_resc_size));

    if (r != NULL)
      {
      /* setenv("PBS_RESOURCE_NODES",r->rs_value.at_val.at_str,1); */
      put_env_var("PBS_RESOURCE_GRES", r->rs_value.at_val.at_str);
      }

    if (TTmpDirName(pjob, buf, sizeof(buf)))
      {
      put_env_var("TMPDIR", buf);
      }

    /* Set PBS_SCHED_HINT */

    {
    char *envname = (char *)"PBS_SCHED_HINT";
    char *envval;

    if ((envval = get_job_envvar(pjob, envname)) != NULL)
      {
      put_env_var("PBS_SCHED_HINT", envval);
      }
    }

    /* Set PBS_NODENUM */

    sprintf(buf, "%d",
      pjob->ji_nodeid);
    put_env_var("PBS_NODENUM", buf);

    /* Set PBS_MSHOST */

    put_env_var("PBS_MSHOST", pjob->ji_vnods[0].vn_host->hn_host);

    /* Set PBS_NODEFILE */

    if (pjob->ji_flags & MOM_HAS_NODEFILE)
      {
      sprintf(buf, "%s/%s",
        path_aux,
        pjob->ji_qs.ji_jobid);
      put_env_var("PBS_NODEFILE", buf);
      }

    /* Set PBS_O_WORKDIR */
    {
    char *workdir_val;

    workdir_val = get_job_envvar(pjob,"PBS_O_WORKDIR");
    if (workdir_val != NULL)
      {
      put_env_var("PBS_O_WORKDIR", workdir_val);
      }
    }

    /* SET BEOWULF_JOB_MAP */

    {

    struct array_strings *vstrs;

    int VarIsSet = 0;
    int j;

    vstrs = pjob->ji_wattr[JOB_ATR_variables].at_val.at_arst;

    for (j = 0;j < vstrs->as_usedptr;++j)
      {
      if (!strncmp(
            vstrs->as_string[j],
            "BEOWULF_JOB_MAP=",
            strlen("BEOWULF_JOB_MAP=")))
        {
        VarIsSet = 1;

        break;
        }
      }

    if (VarIsSet == 1)
      {
      char *val = strchr(vstrs->as_string[j], '=');

      if (val != NULL)
        put_env_var("BEOWULF_JOB_MAP", val+1);
      }
    }

  /* Set some Moab env variables if they exist */

  if ((which == PE_PROLOG) || (which == PE_EPILOG))
    {
    char *tmp_val;

    for (aindex=0;aindex<moabenvcnt;aindex++)
      {
      tmp_val = get_job_envvar(pjob,moabenvs[aindex]);
      if (tmp_val != NULL)
        {
        put_env_var(moabenvs[aindex], tmp_val);
        }
      }
    }

  /*
   * if we want to run as user then we need to reset real user permissions
   * since it seems that some OSs use real not effective user id when execv'ing
   */

  if ((which == PE_PROLOGUSER) || 
      (which == PE_EPILOGUSER) || 
      (which == PE_PROLOGUSERJOB) || 
      (which == PE_EPILOGUSERJOB))
    {
    setuid_ext(pbsuser, TRUE);
    setegid(pbsgroup);

    if (setgid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) != 0)
      {
      snprintf(log_buffer,sizeof(log_buffer),
        "setgid(%lu) for UID = %lu failed: %s\n",
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exgid,
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
        strerror(errno));
      
      log_err(errno, __func__, log_buffer);
     
      exit(-1);
      }
    
    if (setuid_ext(pjob->ji_qs.ji_un.ji_momt.ji_exuid, FALSE) != 0)
      {
      snprintf(log_buffer,sizeof(log_buffer),
        "setuid(%lu) failed: %s\n",
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
        strerror(errno));
      
      log_err(errno, __func__, log_buffer);
     
      exit(-1);
      }
    }

    execv(pelog,arg);

    sprintf(log_buffer,"execv of %s failed: %s\n",
      pelog,
      strerror(errno));

    if (write_ac_socket(2, log_buffer, strlen(log_buffer)) == -1)
      {
      /* cannot write message to stderr */

      /* NO-OP */
      }

    fsync(2);

    exit(255);
    }  /* END else () */

  switch (run_exit)
    {
    case 0:

      /* SUCCESS */

      /* NO-OP */

      break;

    case - 3:

      pelog_err(pjob, pelog, run_exit,  (char *)"child wait interrupted");

      break;

    case - 4:

      pelog_err(pjob, pelog, run_exit,  (char *)"prolog/epilog timeout occurred, child cleaned up");

      break;

    case - 5:

      pelog_err(pjob, pelog, run_exit, (char *) "prolog/epilog timeout occurred, cannot kill child");

      break;

    default:

      pelog_err(pjob, pelog, run_exit,  (char *)"nonzero p/e exit status");

      break;
    }  /* END switch (run_exit) */

  return(run_exit);
  }  /* END run_pelog() */
Пример #4
0
void *delete_job_files(

  void *vp)

  {
  job_file_delete_info *jfdi = (job_file_delete_info *)vp;
  char                  namebuf[MAXPATHLEN];
  int                   rc = 0;

  if (jfdi->has_temp_dir == TRUE)
    {
    if (tmpdir_basename[0] == '/')
      {
      snprintf(namebuf, sizeof(namebuf), "%s/%s", tmpdir_basename, jfdi->jobid);
      sprintf(log_buffer, "removing transient job directory %s",
        namebuf);

      log_record(PBSEVENT_DEBUG,PBS_EVENTCLASS_JOB,jfdi->jobid,log_buffer);

      if ((setegid(jfdi->gid) == -1) ||
          (setuid_ext(jfdi->uid, TRUE) == -1))
        {
        /* FAILURE */
        rc = -1;;
        }
      else
        {
        rc = remtree(namebuf);
        
        setuid_ext(pbsuser, TRUE);
        setegid(pbsgroup);
        }
      
      if ((rc != 0) && 
          (LOGLEVEL >= 5))
        {
        sprintf(log_buffer,
          "recursive remove of job transient tmpdir %s failed",
          namebuf);
        
        log_err(errno, "recursive (r)rmdir", log_buffer);
        }
      }
    } /* END code to remove temp dir */

#ifdef PENABLE_LINUX26_CPUSETS
  /* Delete the cpuset for the job. */
  delete_cpuset(jfdi->jobid);
#endif /* PENABLE_LINUX26_CPUSETS */

  /* delete the node file and gpu file */
  sprintf(namebuf,"%s/%s", path_aux, jfdi->jobid);
  unlink(namebuf);
  
  sprintf(namebuf, "%s/%sgpu", path_aux, jfdi->jobid);
  unlink(namebuf);
  
  sprintf(namebuf, "%s/%smic", path_aux, jfdi->jobid);
  unlink(namebuf);

  /* delete script file */
  if (multi_mom)
    {
    snprintf(namebuf, sizeof(namebuf), "%s%s%d%s",
      path_jobs,
      jfdi->prefix,
      pbs_rm_port,
      JOB_SCRIPT_SUFFIX);
    }
  else
    {
    snprintf(namebuf, sizeof(namebuf), "%s%s%s",
      path_jobs,
      jfdi->prefix,
      JOB_SCRIPT_SUFFIX);
    }

  if (unlink(namebuf) < 0)
    {
    if (errno != ENOENT)
      log_err(errno,__func__,msg_err_purgejob);
    }
  else
    {
    snprintf(log_buffer,sizeof(log_buffer),"removed job script");
    log_record(PBSEVENT_DEBUG,PBS_EVENTCLASS_JOB,jfdi->jobid,log_buffer);
    }

  /* delete job task directory */
  if (multi_mom)
    {
    snprintf(namebuf,sizeof(namebuf),"%s%s%d%s",
      path_jobs,
      jfdi->prefix,
      pbs_rm_port,
      JOB_TASKDIR_SUFFIX);
    }
  else
    {
    snprintf(namebuf,sizeof(namebuf),"%s%s%s",
      path_jobs,
      jfdi->prefix,
      JOB_TASKDIR_SUFFIX);
    }

  remtree(namebuf);

  mom_checkpoint_delete_files(jfdi);

  /* delete job file */
  if (multi_mom)
    {
    snprintf(namebuf,sizeof(namebuf),"%s%s%d%s",
      path_jobs,
      jfdi->prefix,
      pbs_rm_port,
      JOB_FILE_SUFFIX);
    }
  else
    {
    snprintf(namebuf,sizeof(namebuf),"%s%s%s",
      path_jobs,
      jfdi->prefix,
      JOB_FILE_SUFFIX);
    }

  if (unlink(namebuf) < 0)
    {
    if (errno != ENOENT)
      log_err(errno,__func__,msg_err_purgejob);
    }
  else if (LOGLEVEL >= 6)
    {
    snprintf(log_buffer,sizeof(log_buffer),"remove job file");
    log_record(PBSEVENT_DEBUG,PBS_EVENTCLASS_JOB,jfdi->jobid,log_buffer);
    }

  free(jfdi);

  return(NULL);
  } /* END delete_job_files() */
Пример #5
0
int run_pelog(

  int   which,      /* I (one of PE_*) */
  char *specpelog,  /* I - script path */
  job  *pjob,       /* I - associated job */
  int   pe_io_type, /* I - io type */
  int   delete_job)  /* I - called before a job being deleted (purge -p) */

  {
  int               fd_input;

  struct stat       sbuf;
  char              pelog[MAXPATHLEN + 1024];

  uid_t             real_uid;
  gid_t            *real_gids = NULL;
  gid_t             real_gid;
  int               num_gids;

  bool              jobtypespecified = false;

  int               rc;

  char             *ptr;

  int               pipes[2];
  int               kid_read;
  int               kid_write;
  int               parent_read;
  int               parent_write;

  if ((pjob == NULL) ||
      (specpelog == NULL) ||
      (specpelog[0] == '\0'))
    {
    return(0);
    }

  ptr = pjob->ji_wattr[JOB_ATR_jobtype].at_val.at_str;

  if (ptr != NULL)
    {
    jobtypespecified = true;

    snprintf(pelog,sizeof(pelog),"%s.%s",
      specpelog,
      ptr);
    }
  else
    {
    snprintf(pelog, sizeof(pelog), "%s", specpelog);
    }
    
  real_uid = getuid();
  real_gid = getgid();
  if ((num_gids = getgroups(0, real_gids)) < 0)
    {
    log_err(errno, __func__, (char *)"getgroups failed\n");
    
    return(-1);
    }

  /* to support root squashing, become the user before performing file checks */
  if ((which == PE_PROLOGUSER) || 
      (which == PE_EPILOGUSER) || 
      (which == PE_PROLOGUSERJOB) || 
      (which == PE_EPILOGUSERJOB))
    {

    real_gids = (gid_t *)calloc(num_gids, sizeof(gid_t));
    
    if (real_gids == NULL)
      {
      log_err(ENOMEM, __func__, (char *)"Cannot allocate memory! FAILURE\n");
      
      return(-1);
      }
    
    if (getgroups(num_gids,real_gids) < 0)
      {
      log_err(errno, __func__, (char *)"getgroups failed\n");
      free(real_gids);
      
      return(-1);
      }
    
    /* pjob->ji_grpcache will not be set if using LDAP and LDAP not set */
    /* It is possible that ji_grpcache failed to allocate as well. 
       Make sure ji_grpcache is not NULL */
    if (pjob->ji_grpcache != NULL)
      {
      if (setgroups(
            pjob->ji_grpcache->gc_ngroup,
            (gid_t *)pjob->ji_grpcache->gc_groups) != 0)
        {
        snprintf(log_buffer,sizeof(log_buffer),
          "setgroups() for UID = %lu failed: %s\n",
          (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
          strerror(errno));
      
        log_err(errno, __func__, log_buffer);
      
        undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
        free(real_gids);
      
        return(-1);
        }
      }
    else
      {
      sprintf(log_buffer, "pjob->ji_grpcache is null. check_pwd likely failed.");
      log_err(-1, __func__, log_buffer);
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      return(-1);
      }
    
    if (setegid(pjob->ji_qs.ji_un.ji_momt.ji_exgid) != 0)
      {
      snprintf(log_buffer,sizeof(log_buffer),
        "setegid(%lu) for UID = %lu failed: %s\n",
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exgid,
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
        strerror(errno));
      
      log_err(errno, __func__, log_buffer);
      
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      
      return(-1);
      }
    
    if (setuid_ext(pjob->ji_qs.ji_un.ji_momt.ji_exuid, TRUE) != 0)
      {
      snprintf(log_buffer,sizeof(log_buffer),
        "seteuid(%lu) failed: %s\n",
        (unsigned long)pjob->ji_qs.ji_un.ji_momt.ji_exuid,
        strerror(errno));
      
      log_err(errno, __func__, log_buffer);
      
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);

      return(-1);
      }
    }

  rc = check_if_pelog_exists(which, pelog, sizeof(pelog), sbuf, specpelog, *pjob, jobtypespecified);

  switch (rc)
    {
      
    case PBSE_NONE:

      // continue
      break;

    case PELOG_DOESNT_EXIST:

      // not an error but we are done
      rc = PBSE_NONE;
      
      // fall through

    default:

      // error, notify caller 
      undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
      free(real_gids);
      return(rc);
    }

  if (LOGLEVEL >= 5)
    {
    sprintf(log_buffer,"running %s script '%s' for job %s",
      PPEType[which],
      (pelog[0] != '\0') ? pelog : "NULL",
      pjob->ji_qs.ji_jobid);

    log_ext(-1, __func__, log_buffer, LOG_DEBUG);
    }

  /* script must be owned by root, be regular file, read and execute by user *
   * and not writeable by group or other */

  if ((rc = check_pelog_permissions(sbuf, reduceprologchecks, pjob, pelog, which)) != PBSE_NONE)
    {
    undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
    free(real_gids);
    return(rc);
    }

  fd_input = pe_input(pjob->ji_qs.ji_jobid);

  if (fd_input < 0)
    {
    undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
    free(real_gids);
    return(pelog_err(pjob, pelog, -2,  "no pro/epilogue input file"));
    }

  run_exit = 0;

  // Set up communications between parent and child so that
  // child can send back the session id.
  if (pipe(pipes) == -1)
    {
    free(real_gids);
    return(-1);
    }

  if (pipes[1] < 3)
    {
    kid_write = fcntl(pipes[1], F_DUPFD, 3);

    close(pipes[1]);
    }
  else
    {
    kid_write = pipes[1];
    }

  parent_read = pipes[0];

  if (pipe(pipes) == -1)
    {
    free(real_gids);
    return(-1);
    }

  if (pipes[0] < 3)
    {
    kid_read = fcntl(pipes[0], F_DUPFD, 3);

    close(pipes[0]);
    }
  else
    {
    kid_read = pipes[0];
    }

  parent_write = pipes[1];

  if ((kid_read < 0) ||
      (kid_write < 0))
    {
    free(real_gids);
    return(-1);
    }

  child = fork();

  if (child > 0)
    {
    /* parent - watch for prolog/epilog to complete */

    close(fd_input);

    close(kid_read);
    close(kid_write);
    read(parent_read,(char *)&childSessionID,sizeof(childSessionID));
    close(parent_read);
    close(parent_write);


    /* switch back to root if necessary */
    undo_set_euid_egid(which,real_uid,real_gid,num_gids,real_gids,__func__);
    free(real_gids);

    rc = get_child_exit_status(pjob, pelog, which);

    return(rc);
    }
  else if (child == 0)
    {
    /* child - run script */
    prepare_and_run_pelog_as_child(pjob, pe_io_type, delete_job, specpelog, pelog, which,
        parent_read, parent_write, kid_read, kid_write, fd_input);
    
    // NOTREACHED: the above function doesn't return
    exit(255);
    }  /* END else () */
  else
    {
    // ERROR
    log_err(errno, __func__, "Fork failed");
    rc = -1;
    }
    
  free(real_gids);

  return(rc);
  }  /* END run_pelog() */
Пример #6
0
void prepare_and_run_pelog_as_child(

  job  *pjob,
  int   pe_io_type,
  int   delete_job,
  char *specpelog,
  char *pelog,
  int   which,
  int   parent_read,
  int   parent_write,
  int   kid_read,
  int   kid_write,
  int   fd_input)

  {
  char *arg[12];

  handle_pipes_as_child(parent_read, parent_write, kid_read, kid_write);

  close_handles_as_child();

  // setup stdin for the pelog
  if (fd_input != 0)
    {
    close(0);

    if (dup(fd_input) == -1) {}

    close(fd_input);
    }

  setup_pelog_outputs(pjob, pe_io_type, delete_job, specpelog);

  change_directory_as_needed(which, pjob);

  /* for both prolog and epilog */
  if (DEBUGMODE == 1)
    {
    char resc_list[2048];
    fprintf(stderr, "PELOGINFO:  script:'%s'  jobid:'%s'  euser:'******'  egroup:'%s'  jobname:'%s' SSID:'%ld'  RESC:'%s'\n",
            pelog,
            pjob->ji_qs.ji_jobid,
            pjob->ji_wattr[JOB_ATR_euser].at_val.at_str,
            pjob->ji_wattr[JOB_ATR_egroup].at_val.at_str,
            pjob->ji_wattr[JOB_ATR_jobname].at_val.at_str,
            pjob->ji_wattr[JOB_ATR_session_id].at_val.at_long,
            resc_to_string(pjob, JOB_ATR_resource, resc_list, sizeof(resc_list)));
    }

  setup_pelog_arguments(pelog, pjob, which, arg);

  setup_pelog_environment(pjob,  which);
  
  /*
   * if we want to run as user then we need to reset real user permissions
   * since it seems that some OSs use real not effective user id when execv'ing
   */
  
  if ((which == PE_PROLOGUSER) || 
      (which == PE_EPILOGUSER) || 
      (which == PE_PROLOGUSERJOB) || 
      (which == PE_EPILOGUSERJOB))
    {
    setuid_ext(pbsuser, TRUE);
    setegid(pbsgroup);

    if (become_the_user(pjob) != PBSE_NONE)
      {
      exit(-1);
      }
    }

  execv(pelog,arg);

  /* should not be reached, but clean up if error */
  sprintf(log_buffer,"execv of %s failed: %s\n",
    pelog,
    strerror(errno));

  if (write_ac_socket(2, log_buffer, strlen(log_buffer)) == -1)
    {
    /* cannot write message to stderr */

    /* NO-OP */
    }

  fsync(2);

  exit(255);
  } /* END prepare_and_run_pelog_as_child() */