Esempio n. 1
0
void req_stat_que(

  struct batch_request *preq) /* ptr to the decoded request   */

  {
  char     *name;
  pbs_queue    *pque = NULL;

  struct batch_reply *preply;
  int      rc   = 0;
  int      type = 0;

  /*
   * first, validate the name of the requested object, either
   * a queue, or null for all queues
   */

  name = preq->rq_ind.rq_status.rq_id;

  if ((*name == '\0') || (*name == '@'))
    {
    type = 1;
    }
  else
    {
    pque = find_queuebyname(name);

    if (pque == NULL)
      {
      req_reject(PBSE_UNKQUE, 0, preq, NULL, "cannot locate queue");

      return;
      }
    }

  preply = &preq->rq_reply;

  preply->brp_choice = BATCH_REPLY_CHOICE_Status;

  CLEAR_HEAD(preply->brp_un.brp_status);

  if (type == 0)
    {
    /* get status of the named queue */

    rc = status_que(pque, preq, &preply->brp_un.brp_status);
    }
  else
    {
    /* get status of all queues */

    pque = (pbs_queue *)GET_NEXT(svr_queues);

    while (pque != NULL)
      {
      rc = status_que(pque, preq, &preply->brp_un.brp_status);

      if (rc != 0)
        {
        if (rc != PBSE_PERM)
          break;

        rc = 0;
        }

      pque = (pbs_queue *)GET_NEXT(pque->qu_link);
      }
    }

  if (rc != 0)
    {
    reply_free(preply);

    req_reject(rc, bad, preq, NULL, "status_queue failed");
    }
  else
    {
    reply_send(preq);
    }

  return;
  }  /* END req_stat_que() */
Esempio n. 2
0
//	Return true if objp will collide with some large object.
//	Don't check for an object this ship is docked to.
int collide_predict_large_ship(object *objp, float distance)
{
	object	*objp2;
	vec3d	cur_pos, goal_pos;
	ship_info	*sip;

	sip = &Ship_info[Ships[objp->instance].ship_info_index];

	cur_pos = objp->pos;

	vm_vec_scale_add(&goal_pos, &cur_pos, &objp->orient.vec.fvec, distance);

	for ( objp2 = GET_FIRST(&obj_used_list); objp2 != END_OF_LIST(&obj_used_list); objp2 = GET_NEXT(objp2) ) {
		if ((objp != objp2) && (objp2->type == OBJ_SHIP)) {
			if (Ship_info[Ships[objp2->instance].ship_info_index].flags & (SIF_BIG_SHIP | SIF_HUGE_SHIP)) {
				if (dock_check_find_docked_object(objp, objp2))
					continue;

				if (cpls_aux(&goal_pos, objp2, objp))
					return 1;
			}
		} else if (!(sip->flags & (SIF_BIG_SHIP | SIF_HUGE_SHIP)) && (objp2->type == OBJ_ASTEROID)) {
			if (vm_vec_dist_quick(&objp2->pos, &objp->pos) < (distance + objp2->radius)*2.5f) {
				vec3d	pos, delvec;
				int		count;
				float		d1;

				d1 = 2.5f * distance + objp2->radius;
				count = (int) (d1/(objp2->radius + objp->radius));	//	Scale up distance, else looks like there would be a collision.
				pos = cur_pos;
				vm_vec_normalized_dir(&delvec, &goal_pos, &cur_pos);
				vm_vec_scale(&delvec, d1/count);

				for (; count>0; count--) {
					if (vm_vec_dist_quick(&pos, &objp2->pos) < objp->radius + objp2->radius)
						return 1;
					vm_vec_add2(&pos, &delvec);
				}
			}
		}
	}

	return 0;
}
Esempio n. 3
0
/**
 * @brief
 *      Internal session cpu time decoding routine.
 *
 * @param[in] job - a job pointer.
 *
 * @return      ulong
 * @retval      sum of all cpu time consumed for all tasks executed by the job, in seconds,
 *              adjusted by cputfactor.
 *
 */
static unsigned long
cput_sum(job *pjob)
{
	ulong			cputime, addtime;
	int			i;
	int			nps = 0;
	int			taskprocs;
	prstatus_t		*ps;
	prpsinfo_t		*pi;
	task			*ptask;
	ulong			tcput;

	cputime = 0;
	for (ptask = (task *)GET_NEXT(pjob->ji_tasks);
		ptask != NULL;
		ptask = (task *)GET_NEXT(ptask->ti_jobtask)) {

		/* DEAD task */
		if (ptask->ti_qs.ti_sid <= 1) {
			cputime += ptask->ti_cput;
			continue;
		}

		tcput = 0;
		taskprocs = 0;
		for (i=0; i<nproc; i++) {
			pi = &proc_info[i];
			ps = &proc_status[i];

			/* is this process part of the task? */
			if (ptask->ti_qs.ti_sid != pi->pr_sid)
				continue;

			nps++;
			taskprocs++;
			if (pi->pr_state == SZOMB) {
				/* use zombie's time iff top process in job */
				if ((pi->pr_sid != pi->pr_pid) &&
					(pi->pr_ppid != mom_pid))
					continue;

				tcput += tv(pi->pr_time);
				DBPRT(("%s: task %08.8X ses %d pid %d "
					"(zombie) cputime %lu\n", __func__,
					pi->pr_sid, pi->pr_pid, tcput))
			} else {
				addtime = tv(ps->pr_utime) + tv(ps->pr_stime) +
					tv(ps->pr_cutime) + tv(ps->pr_cstime);

				tcput += addtime;
				DBPRT(("%s: task %08.8X ses %d pid %d "
					"cputime %lu\n", __func__,
					ptask->ti_qs.ti_task, pi->pr_sid,
					ps->pr_pid, tcput))
			}
		}
		if (tcput > ptask->ti_cput)
			ptask->ti_cput = tcput;
		cputime += ptask->ti_cput;
		DBPRT(("%s: task %08.8X cput %lu total %lu\n", __func__,
			ptask->ti_qs.ti_task, ptask->ti_cput, cputime))

		if (taskprocs == 0) {
			sprintf(log_buffer,
				"no active process for task %8.8X",
				ptask->ti_qs.ti_task);
			log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB,
				LOG_INFO, pjob->ji_qs.ji_jobid,
				log_buffer);
			ptask->ti_qs.ti_exitstat = 0;
			ptask->ti_qs.ti_status = TI_STATE_EXITED;
			if (pjob->ji_qs.ji_un.ji_momt.ji_exitstat >= 0)
				pjob->ji_qs.ji_un.ji_momt.ji_exitstat = 0;
			task_save(ptask);
			exiting_tasks = 1;
		}
	}
Esempio n. 4
0
/* array_recov reads in  an array struct saved to disk and inserts it into
   the servers list of arrays */
int array_recov(

  char       *path, 
  job_array **new_pa)

  {
  job_array *pa;
  array_request_node *rn;
  char  log_buf[LOCAL_LOG_BUF_SIZE];
  int   fd;
  int   old_version;
  int   num_tokens;
  int   i;
  int   len;
  int   rc;

  *new_pa = NULL;

  old_version = ARRAY_QS_STRUCT_VERSION;

  /* allocate the storage for the struct */
  pa = (job_array*)calloc(1,sizeof(job_array));

  if (pa == NULL)
    {
    return(PBSE_SYSTEM);
    }

  /* initialize the linked list nodes */

  CLEAR_HEAD(pa->request_tokens);

  fd = open(path, O_RDONLY, 0);
  if(fd < 0)
    {
    free(pa);
    return(PBSE_SYSTEM);
    }

  if (array_259_upgrade)
    {
    rc = read_and_convert_259_array(fd, pa, path);
    if (rc != PBSE_NONE)
      {
      free(pa);
      close(fd);
      return(rc);
      }
    }
  else
    {

    /* read the file into the struct previously allocated.
     */

    len = read_ac_socket(fd, &(pa->ai_qs), sizeof(pa->ai_qs));
    if ((len < 0) || ((len < (int)sizeof(pa->ai_qs)) && (pa->ai_qs.struct_version == ARRAY_QS_STRUCT_VERSION)))
      {
      sprintf(log_buf, "error reading %s", path);
      log_err(errno, __func__, log_buf);
      free(pa);
      close(fd);
      return(PBSE_SYSTEM);
      }

    if (pa->ai_qs.struct_version != ARRAY_QS_STRUCT_VERSION)
      {
      rc = array_upgrade(pa, fd, pa->ai_qs.struct_version, &old_version);
      if (rc)
        {
        sprintf(log_buf, "Cannot upgrade array version %d to %d", pa->ai_qs.struct_version, ARRAY_QS_STRUCT_VERSION);
        log_err(errno, __func__, log_buf);
        free(pa);
        close(fd);
        return(rc);
        }
      }
    }

  pa->job_ids = (char **)calloc(pa->ai_qs.array_size, sizeof(char *));

  /* check to see if there is any additional info saved in the array file */
  /* check if there are any array request tokens that haven't been fully
     processed */

  if (old_version > 1)
    {
    if (read_ac_socket(fd, &num_tokens, sizeof(int)) != sizeof(int))
      {
      sprintf(log_buf, "error reading token count from %s", path);
      log_err(errno, __func__, log_buf);

      free(pa);
      close(fd);
      return(PBSE_SYSTEM);
      }

    for (i = 0; i < num_tokens; i++)
      {
      rn = (array_request_node *)calloc(1, sizeof(array_request_node));

      if (read_ac_socket(fd, rn, sizeof(array_request_node)) != sizeof(array_request_node))
        {
        sprintf(log_buf, "error reading array_request_node from %s", path);
        log_err(errno, __func__, log_buf);

        free(rn);

        for (rn = (array_request_node*)GET_NEXT(pa->request_tokens);
            rn != NULL;
            rn = (array_request_node*)GET_NEXT(pa->request_tokens))
          {
          delete_link(&rn->request_tokens_link);
          free(rn);
          }

        free(pa);

        close(fd);
        return(PBSE_SYSTEM);
        }

      CLEAR_LINK(rn->request_tokens_link);

      append_link(&pa->request_tokens, &rn->request_tokens_link, (void*)rn);

      }

    }

  close(fd);

  CLEAR_HEAD(pa->ai_qs.deps);

  if (old_version != ARRAY_QS_STRUCT_VERSION)
    {
    /* resave the array struct if the version on disk is older than the current */
    array_save(pa);
    }

  pa->ai_mutex = (pthread_mutex_t *)calloc(1, sizeof(pthread_mutex_t));
  pthread_mutex_init(pa->ai_mutex,NULL);

  lock_ai_mutex(pa, __func__, NULL, LOGLEVEL);

  /* link the struct into the servers list of job arrays */
  insert_array(pa);

  *new_pa = pa;

  return(PBSE_NONE);
  } /* END array_recov() */
Esempio n. 5
0
int setup_array_struct(
    
  job *pjob)

  {
  job_array          *pa;
  array_request_node *rn;

  int                 bad_token_count;
  int                 array_size;
  int                 rc;
  char                log_buf[LOCAL_LOG_BUF_SIZE];
  long                max_array_size;

  if(pjob == NULL)
    return RM_ERR_BADPARAM;

  pa = (job_array *)calloc(1,sizeof(job_array));

  pa->ai_qs.struct_version = ARRAY_QS_STRUCT_VERSION;
  
  strcpy(pa->ai_qs.parent_id, pjob->ji_qs.ji_jobid);
  strcpy(pa->ai_qs.fileprefix, pjob->ji_qs.ji_fileprefix);
  snprintf(pa->ai_qs.owner, sizeof(pa->ai_qs.owner), "%s", pjob->ji_wattr[JOB_ATR_job_owner].at_val.at_str);
  snprintf(pa->ai_qs.submit_host, sizeof(pa->ai_qs.submit_host), "%s", get_variable(pjob, pbs_o_host));

  pa->ai_qs.num_cloned = 0;
  CLEAR_HEAD(pa->request_tokens);

  pa->ai_mutex = (pthread_mutex_t *)calloc(1, sizeof(pthread_mutex_t));
  pthread_mutex_init(pa->ai_mutex, NULL);
  lock_ai_mutex(pa, __func__, NULL, LOGLEVEL);

  if (job_save(pjob, SAVEJOB_FULL, 0) != 0)
    {
    /* the array is deleted in svr_job_purge */
    unlock_ai_mutex(pa, __func__, "1", LOGLEVEL);
    /* Does job array need to be removed? */

    if (LOGLEVEL >= 6)
      {
      log_record(
        PBSEVENT_JOB,
        PBS_EVENTCLASS_JOB,
        pjob->ji_qs.ji_jobid,
        (char *)"cannot save job");
      }

    svr_job_purge(pjob);

    return(1);
    }

  if ((rc = set_slot_limit(pjob->ji_wattr[JOB_ATR_job_array_request].at_val.at_str, pa)))
    {
    long max_limit = 0;
    get_svr_attr_l(SRV_ATR_MaxSlotLimit, &max_limit);
    array_delete(pa);

    snprintf(log_buf,sizeof(log_buf),
      "Array %s requested a slot limit above the max limit %ld, rejecting\n",
      pa->ai_qs.parent_id,
      max_limit);

    log_event(PBSEVENT_SYSTEM,PBS_EVENTCLASS_JOB,pa->ai_qs.parent_id,log_buf);

    return(INVALID_SLOT_LIMIT);
    }

  pa->ai_qs.jobs_running = 0;
  pa->ai_qs.num_started = 0;
  pa->ai_qs.num_failed = 0;
  pa->ai_qs.num_successful = 0;
  
  bad_token_count = parse_array_request(
                      pjob->ji_wattr[JOB_ATR_job_array_request].at_val.at_str,
                      &(pa->request_tokens));

  /* get the number of elements that should be allocated in the array */
  rn = (array_request_node *)GET_NEXT(pa->request_tokens);
  array_size = 0;
  pa->ai_qs.num_jobs = 0;
  while (rn != NULL) 
    {
    if (rn->end > array_size)
      array_size = rn->end;
    /* calculate the actual number of jobs (different from array size) */
    pa->ai_qs.num_jobs += rn->end - rn->start + 1;

    rn = (array_request_node *)GET_NEXT(rn->request_tokens_link);
    }

  /* size of array is the biggest index + 1 */
  array_size++; 

  if (get_svr_attr_l(SRV_ATR_MaxArraySize, &max_array_size) == PBSE_NONE)
    {
    if (max_array_size < pa->ai_qs.num_jobs)
      {
      array_delete(pa);

      return(ARRAY_TOO_LARGE);
      }
    }

  /* initialize the array */
  pa->job_ids = (char **)calloc(array_size, sizeof(char *));
  if (pa->job_ids == NULL)
    {
    sprintf(log_buf, "Failed to alloc job_ids: job %s", pjob->ji_qs.ji_jobid);
    log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf);
    return(PBSE_MEM_MALLOC);
    }

  /* remember array_size */
  pa->ai_qs.array_size = array_size;

  CLEAR_HEAD(pa->ai_qs.deps);

  array_save(pa);

  if (bad_token_count > 0)
    {
    array_delete(pa);
    return 2;
    }

  strcpy(pjob->ji_arraystructid, pa->ai_qs.parent_id);

  insert_array(pa);

  unlock_ai_mutex(pa, __func__, "1", LOGLEVEL);

  return(PBSE_NONE);
  } /* END setup_array_struct() */
Esempio n. 6
0
/**
 * @brief
 * 	main - the initialization and main loop of pbs_daemon
 */
int
main(int argc, char *argv[])
{
	char		jobfile[MAXPATHLEN+1];
	char		jobfile_full[MAXPATHLEN+1];
	pbs_net_t	hostaddr = 0;
	int			port = -1;
	int			move_type = -1;
	pbs_list_head	attrl;
	enum conn_type  cntype = ToServerDIS;
	int		con = -1;
	char		*destin;
	int			encode_type;
	int			i;
	job			*jobp;
	char		 job_id[PBS_MAXSVRJOBID+1];
	attribute	*pattr;
	struct attropl  *pqjatr;      /* list (single) of attropl for quejob */
	char		 script_name[MAXPATHLEN+1];
	int			in_server = -1;
	char		*param_name, *param_val;
	char		buf[4096];
	struct  hostent *hp;
	struct in_addr  addr;
	char            *credbuf = NULL;
	size_t		credlen = 0;
	int 		prot = PROT_TCP;
	/*the real deal or output version and exit?*/

	execution_mode(argc, argv);

	/* If we are not run with real and effective uid of 0, forget it */

	pbs_loadconf(0);

	if (!isAdminPrivilege(getlogin())) {
		fprintf(stderr, "%s: Must be run by root\n", argv[0]);
		exit(SEND_JOB_FATAL);
	}

	/* initialize the pointers in the resource_def array */
	for (i = 0; i < (svr_resc_size - 1); ++i)
		svr_resc_def[i].rs_next = &svr_resc_def[i+1];
	/* last entry is left with null pointer */
	/* set single threaded mode */
	pbs_client_thread_set_single_threaded_mode();
	/* disable attribute verification */
	set_no_attribute_verification();

	/* initialize the thread context */
	if (pbs_client_thread_init_thread_context() != 0) {
		fprintf(stderr, "%s: Unable to initialize thread context\n",
			argv[0]);
		exit(SEND_JOB_FATAL);
	}

	if(set_msgdaemonname("PBS_send_job")) {
		fprintf(stderr, "Out of memory\n");
		return 1;
	}

	winsock_init();

	connection_init();

	while (fgets(buf, sizeof(buf), stdin) != NULL) {
		buf[strlen(buf)-1] = '\0';	/* gets rid of newline */

		param_name = buf;
		param_val = strchr(buf, '=');
		if (param_val) {
			*param_val = '\0';
			param_val++;
		} else {	/* bad param_val -- skipping */
			break;
		}

		if (strcmp(param_name, "jobfile") == 0) {
			jobfile[0] = '\0';
			strncpy(jobfile, param_val, MAXPATHLEN);
		} else if (strcmp(param_name, "destaddr") == 0) {
			hostaddr = atol(param_val);
		} else if (strcmp(param_name, "destport") == 0) {
			port = atoi(param_val);
		} else if (strcmp(param_name, "move_type") == 0) {
			move_type = atoi(param_val);
		} else if (strcmp(param_name, "in_server") == 0) {
			in_server = atoi(param_val);
		} else if (strcmp(param_name, "server_name") == 0) {
			server_name[0] = '\0';
			strncpy(server_name, param_val, PBS_MAXSERVERNAME);
		} else if (strcmp(param_name, "server_host") == 0) {
			server_host[0] = '\0';
			strncpy(server_host, param_val, (sizeof(server_host) - 1));
		} else if (strcmp(param_name, "server_addr") == 0) {
			pbs_server_addr = atol(param_val);
		} else if (strcmp(param_name, "server_port") == 0) {
			pbs_server_port_dis = atoi(param_val);
		} else if (strcmp(param_name, "log_file") == 0) {
			log_file = strdup(param_val);
		} else if (strcmp(param_name, "path_log") == 0) {
			path_log[0] = '\0';
			strncpy(path_log, param_val, MAXPATHLEN);
		} else if (strcmp(param_name, "path_jobs") == 0) {
			path_jobs = strdup(param_val);
		} else if (strcmp(param_name, "path_spool") == 0) {
			path_spool = strdup(param_val);
		} else if (strcmp(param_name, "path_rescdef") == 0) {
			path_rescdef = strdup(param_val);
		} else if (strcmp(param_name, "path_users") == 0) {
			path_users = strdup(param_val);
		} else if (strcmp(param_name, "path_hooks_workdir") == 0) {
			path_hooks_workdir = strdup(param_val);
			if (path_hooks_workdir == NULL)
				exit(SEND_JOB_FATAL);
		} else if (strcmp(param_name, "svr_history_enable") == 0) {
			svr_history_enable = atol(param_val);
		} else if (strcmp(param_name, "svr_history_duration") == 0) {
			svr_history_duration = atol(param_val);
		} else if (strcmp(param_name, "single_signon_password_enable") == 0) {
			if (decode_b(&server.sv_attr[(int)SRV_ATR_ssignon_enable],
				NULL, NULL, param_val) != 0) {
				fprintf(stderr, "%s: failed to set ssignon_password_enable\n", argv[0]);
				exit(SEND_JOB_FATAL);
			}
		} else if (strcmp(param_name, "script_name") == 0) {
			strncpy(script_name, param_val, MAXPATHLEN + 1);
		} else
			break;
	}

	time(&time_now);

	(void)log_open_main(log_file, path_log, 1); /* silent open */

	if (setup_resc(1) == -1) {
		/* log_buffer set in setup_resc */
		log_err(-1, "pbsd_send_job(setup_resc)", log_buffer);
		return (-1);
	}

	if( strlen(jobfile) == 0 || hostaddr == 0 || port == 0 ||  move_type == -1 || \
			in_server == -1 || strlen(server_name) == 0 || strlen(server_host) == 0 || \
			pbs_server_addr == 0 || pbs_server_port_dis == 0 || \
			strlen(path_log) == 0 || path_jobs == NULL || \
			path_spool == NULL || path_users == NULL ) {
		log_err(-1, "pbs_send_job", "error on one of the parameters");
		log_close(0);	/* silent close */
		exit(SEND_JOB_FATAL);
	}

	CLEAR_HEAD(task_list_immed);
	CLEAR_HEAD(task_list_timed);
	CLEAR_HEAD(task_list_event);
	CLEAR_HEAD(svr_queues);
	CLEAR_HEAD(svr_alljobs);
	CLEAR_HEAD(svr_newjobs);
	CLEAR_HEAD(svr_allresvs);
	CLEAR_HEAD(svr_newresvs);
	CLEAR_HEAD(svr_deferred_req);
	CLEAR_HEAD(svr_unlicensedjobs);

	strcpy(jobfile_full, path_jobs);
	strcat(jobfile_full, jobfile);

	if (chk_save_file(jobfile_full) != 0) {
		sprintf(log_buffer, "Error opening jobfile=%s", jobfile);
		log_err(-1, __func__, log_buffer);
		goto fatal_exit;
	}

	if ((jobp=job_recov_fs(jobfile)) == NULL) {
		sprintf(log_buffer, "Failed to recreate job in jobfile=%s", jobfile);
		log_err(-1, __func__, log_buffer);
		goto fatal_exit;
	}

	/* now delete the temp job file that was created by job_save_fs in server code
	 * jobs are in database now, no need to keep in filesystem
	 */
	unlink(jobfile_full);

	if (in_server)
		append_link(&svr_alljobs, &jobp->ji_alljobs, jobp);


	/* select attributes/resources to send based on move type */

	if (move_type == MOVE_TYPE_Exec) {
		resc_access_perm = ATR_DFLAG_MOM;
		encode_type = ATR_ENCODE_MOM;
		cntype = ToServerDIS;
	} else {
		resc_access_perm = ATR_DFLAG_USWR | ATR_DFLAG_OPWR |
			ATR_DFLAG_MGWR | ATR_DFLAG_SvRD;
		encode_type = ATR_ENCODE_SVR;
		svr_dequejob(jobp);
	}

	CLEAR_HEAD(attrl);
	pattr = jobp->ji_wattr;
	for (i=0; i < (int)JOB_ATR_LAST; i++) {
		if ((job_attr_def+i)->at_flags & resc_access_perm) {
			(void)(job_attr_def+i)->at_encode(pattr+i, &attrl,
				(job_attr_def+i)->at_name, NULL,
				encode_type, NULL);
		}
	}
	attrl_fixlink(&attrl);

	/* script name is passed from parent */

	/* get host name */
	pbs_loadconf(0);
	addr.s_addr = htonl(hostaddr);
	hp = gethostbyaddr((void *)&addr, sizeof(struct in_addr), AF_INET);
	if (hp == NULL) {
		sprintf(log_buffer, "%s: h_errno=%d",
			inet_ntoa(addr), h_errno);
		log_err(-1, __func__, log_buffer);
	}
	else {
		/* read any credential file */
		(void)get_credential(hp->h_name, jobp,  PBS_GC_BATREQ,
			&credbuf, &credlen);
	}
	/* save the job id for when after we purge the job */

	(void)strcpy(job_id, jobp->ji_qs.ji_jobid);

	con = -1;

	DIS_tcparray_init();

	for (i=0; i<RETRY; i++) {

		pbs_errno = 0;
		/* connect to receiving server with retries */

		if (i > 0) {	/* recycle after an error */
			if (con >= 0)
				svr_disconnect(con);
			if (should_retry_route(pbs_errno) == -1) {
				goto fatal_exit;	/* fatal error, don't retry */
			}
			sleep(1<<i);
		}
		if ((con = svr_connect(hostaddr, port, 0, cntype, prot)) ==
			PBS_NET_RC_FATAL) {
			(void)sprintf(log_buffer, "send_job failed to %lx port %d",
				hostaddr, port);
			log_err(pbs_errno, __func__, log_buffer);
			goto fatal_exit;
		} else if (con == PBS_NET_RC_RETRY) {
			pbs_errno = WSAECONNREFUSED;	/* should retry */
			continue;
		}
		/*
		 * if the job is substate JOB_SUBSTATE_TRNOUTCM which means
		 * we are recovering after being down or a late failure, we
		 * just want to send the "read-to-commit/commit"
		 */


		if (jobp->ji_qs.ji_substate != JOB_SUBSTATE_TRNOUTCM) {

			if (jobp->ji_qs.ji_substate != JOB_SUBSTATE_TRNOUT) {
				jobp->ji_qs.ji_substate = JOB_SUBSTATE_TRNOUT;
			}

			pqjatr = &((svrattrl *)GET_NEXT(attrl))->al_atopl;
			destin = jobp->ji_qs.ji_destin;

			if (PBSD_queuejob(con, jobp->ji_qs.ji_jobid, destin, pqjatr, NULL, prot, NULL)== 0) {
				if (pbs_errno == PBSE_JOBEXIST && move_type == MOVE_TYPE_Exec) {
					/* already running, mark it so */
					log_event(PBSEVENT_ERROR,
						PBS_EVENTCLASS_JOB, LOG_INFO,
						jobp->ji_qs.ji_jobid, "Mom reports job already running");
					goto ok_exit;

				} else if ((pbs_errno == PBSE_HOOKERROR) ||
					(pbs_errno == PBSE_HOOK_REJECT)  ||
					(pbs_errno == PBSE_HOOK_REJECT_RERUNJOB)  ||
					(pbs_errno == PBSE_HOOK_REJECT_DELETEJOB)) {
					char		name_buf[MAXPATHLEN+1];
					int		rfd;
					int		len;
					char		*reject_msg;
					int		err;

					err = pbs_errno;

					reject_msg = pbs_geterrmsg(con);
					(void)snprintf(log_buffer, sizeof(log_buffer),
						"send of job to %s failed error = %d reject_msg=%s",
						destin, err,
						reject_msg?reject_msg:"");
					log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB,
						LOG_INFO, jobp->ji_qs.ji_jobid,
						log_buffer);

					(void)strcpy(name_buf, path_hooks_workdir);
					(void)strcat(name_buf, jobp->ji_qs.ji_jobid);
					(void)strcat(name_buf, HOOK_REJECT_SUFFIX);

					if ((reject_msg != NULL) &&
						(reject_msg[0] != '\0')) {

						if ((rfd = open(name_buf,
							O_RDWR|O_CREAT|O_TRUNC, 0600)) == -1) {
							snprintf(log_buffer,
								sizeof(log_buffer),
								"open of reject file %s failed: errno %d",
								name_buf, errno);
							log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, LOG_INFO, jobp->ji_qs.ji_jobid, log_buffer);
						} else {
							secure_file(name_buf, "Administrators",
								READS_MASK|WRITES_MASK|STANDARD_RIGHTS_REQUIRED);
							setmode(rfd, O_BINARY);
							len = strlen(reject_msg)+1;
							/* write also trailing null char */
							if (write(rfd, reject_msg, len) != len) {
								snprintf(log_buffer,
									sizeof(log_buffer),
									"write to file %s incomplete: errno %d", name_buf, errno);
								log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, LOG_INFO, jobp->ji_qs.ji_jobid, log_buffer);
							}
							close(rfd);
						}
					}

					if (err == PBSE_HOOKERROR)
						exit(SEND_JOB_HOOKERR);
					if (err == PBSE_HOOK_REJECT)
						exit(SEND_JOB_HOOK_REJECT);
					if (err == PBSE_HOOK_REJECT_RERUNJOB)
						exit(SEND_JOB_HOOK_REJECT_RERUNJOB);
					if (err == PBSE_HOOK_REJECT_DELETEJOB)
						exit(SEND_JOB_HOOK_REJECT_DELETEJOB);
				} else {
					(void)sprintf(log_buffer, "send of job to %s failed error = %d", destin, pbs_errno);
					log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, LOG_INFO, jobp->ji_qs.ji_jobid, log_buffer);
					continue;
				}
			}

			if (jobp->ji_qs.ji_svrflags & JOB_SVFLG_SCRIPT) {
				if (PBSD_jscript(con, script_name, prot, NULL) != 0)
					continue;
			}

			if (credlen > 0) {
				int     ret;

				ret = PBSD_jcred(con,
					jobp->ji_extended.ji_ext.ji_credtype,
					credbuf, credlen, prot, NULL);
				if ((ret == 0) || (i == (RETRY - 1)))
					free(credbuf);	/* free credbuf if credbuf is sent successfully OR */
				/* at the end of all retry attempts */
				if (ret != 0)
					continue;
			}

			if ((move_type == MOVE_TYPE_Exec) &&
				(jobp->ji_qs.ji_svrflags & JOB_SVFLG_HASRUN) &&
				(hostaddr !=  pbs_server_addr)) {
				/* send files created on prior run */
				if ((move_job_file(con, jobp, StdOut, prot) != 0) ||
					(move_job_file(con, jobp, StdErr, prot) != 0) ||
					(move_job_file(con, jobp, Chkpt, prot) != 0))
					continue;
			}

			jobp->ji_qs.ji_substate = JOB_SUBSTATE_TRNOUTCM;
		}

		if (PBSD_rdytocmt(con, job_id, prot, NULL) != 0)
			continue;

		if (PBSD_commit(con, job_id, prot, NULL) != 0)
			goto fatal_exit;
		goto ok_exit;	/* This child process is all done */
	}
	if (con >= 0)
		svr_disconnect(con);
	/*
	 * If connection is actively refused by the execution node(or mother superior) OR
	 * the execution node(or mother superior) is rejecting request with error
	 * PBSE_BADHOST(failing to authorize server host), the node should be marked down.
	 */
	if ((move_type == MOVE_TYPE_Exec) && (pbs_errno == WSAECONNREFUSED || pbs_errno == PBSE_BADHOST)) {
		i = SEND_JOB_NODEDW;
	} else if (should_retry_route(pbs_errno) == -1) {
		i = SEND_JOB_FATAL;
	} else {
		i = SEND_JOB_RETRY;
	}
	(void)sprintf(log_buffer, "send_job failed with error %d", pbs_errno);
	log_event(PBSEVENT_DEBUG, PBS_EVENTCLASS_JOB, LOG_NOTICE,
		jobp->ji_qs.ji_jobid, log_buffer);
	log_close(0);
	net_close(-1);
	unlink(script_name);
	exit(i);

fatal_exit:
	if (con >= 0)
		svr_disconnect(con);
	log_close(0);
	net_close(-1);
	unlink(script_name);
	exit(SEND_JOB_FATAL);

ok_exit:
	if (con >= 0)
		svr_disconnect(con);
	log_close(0);
	net_close(-1);
	unlink(script_name);
	exit(SEND_JOB_OK);
}
Esempio n. 7
0
int release_array_range(

  job_array            *pa,
  struct batch_request *preq,
  char                 *range_str)

  {
  tlist_head tl;
  int i;
  int rc;
  job                *pjob;

  array_request_node *rn;
  array_request_node *to_free;
  
  char *range = strchr(range_str,'=');
  if (range == NULL)
    return(PBSE_IVALREQ);

  range++; /* move past the '=' */
  
  CLEAR_HEAD(tl);
  
  if (parse_array_request(range,&tl) > 0)
    {
    /* don't hold the jobs if range error */
    
    return(PBSE_IVALREQ);
    }
  
  /* hold just that range from the array */
  rn = (array_request_node*)GET_NEXT(tl);
  
  while (rn != NULL)
    {
    for (i = rn->start; i <= rn->end; i++)
      {
      /* don't stomp on other memory */
      if (i >= pa->ai_qs.array_size)
        continue;

      if (pa->job_ids[i] == NULL)
        continue;

      if ((pjob = svr_find_job(pa->job_ids[i], FALSE)) == NULL)
        {
        free(pa->job_ids[i]);
        pa->job_ids[i] = NULL;
        }
      else
        {
        if ((rc = release_job(preq,pjob)))
          {
          unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL);
          return(rc);
          }
        unlock_ji_mutex(pjob, __func__, "2", LOGLEVEL);
        }
      }
    
    /* release mem */
    to_free = rn;
    rn = (array_request_node*)GET_NEXT(rn->request_tokens_link);
    free(to_free);
    }

  return(PBSE_NONE);
  } /* END release_array_range() */
Esempio n. 8
0
static void stat_update(

  struct work_task *pwt)

  {

  struct stat_cntl     *cntl;
  job                  *pjob;

  struct batch_request *preq;

  struct batch_reply   *preply;

  struct brp_status    *pstatus;
  svrattrl        *sattrl;
  int    oldsid;

  preq = pwt->wt_parm1;
  preply = &preq->rq_reply;
  cntl = preq->rq_extra;

  if (preply->brp_choice == BATCH_REPLY_CHOICE_Status)
    {
    pstatus = (struct brp_status *)GET_NEXT(preply->brp_un.brp_status);

    while (pstatus != NULL)
      {
      if ((pjob = find_job(pstatus->brp_objname)))
        {
        sattrl = (svrattrl *)GET_NEXT(pstatus->brp_attr);

        oldsid = pjob->ji_wattr[(int)JOB_ATR_session_id].at_val.at_long;

        modify_job_attr(
          pjob,
          sattrl,
          ATR_DFLAG_MGWR | ATR_DFLAG_SvWR,
          &bad);

        if (oldsid != pjob->ji_wattr[(int)JOB_ATR_session_id].at_val.at_long)
          {
          /* first save since running job (or the sid has changed), */
          /* must save session id    */

          job_save(pjob, SAVEJOB_FULL);

          svr_mailowner(pjob, MAIL_BEGIN, MAIL_NORMAL, NULL);
          }

#ifdef USESAVEDRESOURCES
        else
          {
          /* save so we can recover resources used */
          job_save(pjob, SAVEJOB_FULL);
          }
#endif    /* USESAVEDRESOURCES */


        pjob->ji_momstat = time_now;
        }

      pstatus = (struct brp_status *)GET_NEXT(pstatus->brp_stlink);
      }  /* END while (pstatus != NULL) */
    }    /* END if (preply->brp_choice == BATCH_REPLY_CHOICE_Status) */
  else
    {
    if (preply->brp_code == PBSE_UNKJOBID)
      {
      /* we sent a stat request, but mom says it doesn't know anything about
         the job */
      if ((pjob = find_job(preq->rq_ind.rq_status.rq_id)))
        {
        /* job really isn't running any more - mom doesn't know anything about it
           this can happen if a diskless node reboots and the mom_priv/jobs
           directory is cleared, set its state to queued so job_abt doesn't
           think it is still running */
        svr_setjobstate(pjob, JOB_STATE_QUEUED, JOB_SUBSTATE_ABORT);
        rel_resc(pjob);
        job_abt(&pjob, "Job does not exist on node");

        /* TODO, if the job is rerunnable we should set its state back to queued */

        }
      }
    }

  release_req(pwt);

  cntl->sc_conn = -1;

  if (cntl->sc_post)
    cntl->sc_post(cntl); /* continue where we left off */
  else
    free(cntl); /* a bit of a kludge but its saves an extra func */

  return;
  }  /* END stat_update() */
Esempio n. 9
0
inline static void insert_node( void *bp ) {

	RESET_PREALLOC_INFO(GET_NEXT(bp)); //reset the second bit of the HDRPer of the next block

    if (GET_SIZE(bp) == 8) {
    	PUT_LEFT(bp, list_for_8);
    	list_for_8 = bp;
    	/*used to be wrong with mistaking SET to RESET */
    	SET_PRE_8_INFO(GET_NEXT(bp)); 
    	
    	return;
    } else if (GET_SIZE(bp) == 16){
    	if (list_for_16 == NULL_POINTER) {
    		PUT_RIGHT(bp, list_for_16);
    		PUT_LEFT(bp, NULL_POINTER);
    		list_for_16 = bp;
    		return;
    	}
    	PUT_RIGHT(bp, list_for_16);
    	PUT_LEFT(list_for_16, bp);
    	PUT_LEFT(bp, NULL_POINTER);
    	list_for_16 = bp;
    } else {					//use BST when block size > 16
    	if (root == NULL_POINTER) {
    		PUT_LEFT(bp, NULL_POINTER);
    		PUT_RIGHT(bp, NULL_POINTER);
    		PUT_PARENT(bp, NULL_POINTER);
    		PUT_BROTHER(bp, NULL_POINTER);
    		root = bp;   //root is also an address.
    		return;
    	} else {
    		void *parent = root;
    		void *temp = root;
    		int flag = 0; 
    		/*flag = 0 : the root itself; flag = 1 : left; flag = 2 : right*/
    		while (temp != NULL_POINTER) {
    			/*when finding the free block in BST with the same size*/
    			if (GET_SIZE(temp) == GET_SIZE(bp)) {
    				PUT_LEFT(bp, GET_LEFT(temp));
    				PUT_RIGHT(bp, GET_RIGHT(temp));
    				PUT_BROTHER(bp, temp);
    				PUT_PARENT(GET_LEFT(temp), bp);
    				PUT_PARENT(GET_RIGHT(temp), bp);
    				PUT_LEFT(temp, bp);

    				PUT_PARENT(bp, GET_PARENT(temp));
    				if (flag == 0) {
    					root = bp;
    				} else if (flag == 1) {
    					PUT_LEFT(parent, bp);
    				} else {
    					PUT_RIGHT(parent, bp);
    				}
    				
    				return;
    				/*SEARCH RIGHT CHILD*/
    			} else if (GET_SIZE(temp) < GET_SIZE(bp)) {
    				parent = temp;
    				temp = (void *)GET_RIGHT(temp);
    				flag = 2;
    				/*SEARCH LEFT CHILD*/
    			} else {
    				parent = temp;
    				temp = (void *)GET_LEFT(temp);
    				flag = 1;
    			}
    		
    		}
    		/*insert new node*/
    		if (flag == 1) {
    			PUT_LEFT(parent, bp);
    		} else {
    			PUT_RIGHT(parent, bp);
    		}
    		
    		PUT_LEFT(bp, NULL_POINTER);
    		PUT_RIGHT(bp, NULL_POINTER);
			PUT_PARENT(bp, parent);
			PUT_BROTHER(bp, NULL_POINTER);   		
    	
    	}
    
    }
      //BST_checker(root);
}
Esempio n. 10
0
void req_stat_svr(

  struct batch_request *preq) /* ptr to the decoded request */

  {
  svrattrl    *pal;

  struct batch_reply *preply;

  struct brp_status  *pstat;
  int *nc;
  static char nc_buf[128];

  /* update count and state counts from sv_numjobs and sv_jobstates */

  server.sv_attr[(int)SRV_ATR_TotalJobs].at_val.at_long = server.sv_qs.sv_numjobs;
  server.sv_attr[(int)SRV_ATR_TotalJobs].at_flags |= ATR_VFLAG_SET;

  update_state_ct(
    &server.sv_attr[(int)SRV_ATR_JobsByState],
    server.sv_jobstates,
    server.sv_jobstbuf);

  nc = netcounter_get();
  sprintf(nc_buf, "%d %d %d", *nc, *(nc + 1), *(nc + 2));
  server.sv_attr[(int)SRV_ATR_NetCounter].at_val.at_str = nc_buf;
  server.sv_attr[(int)SRV_ATR_NetCounter].at_flags |= ATR_VFLAG_SET;

  /* allocate a reply structure and a status sub-structure */

  preply = &preq->rq_reply;
  preply->brp_choice = BATCH_REPLY_CHOICE_Status;

  CLEAR_HEAD(preply->brp_un.brp_status);

  pstat = (struct brp_status *)malloc(sizeof(struct brp_status));

  if (pstat == NULL)
    {
    reply_free(preply);

    req_reject(PBSE_SYSTEM, 0, preq, NULL, NULL);

    return;
    }

  CLEAR_LINK(pstat->brp_stlink);

  strcpy(pstat->brp_objname, server_name);

  pstat->brp_objtype = MGR_OBJ_SERVER;

  CLEAR_HEAD(pstat->brp_attr);

  append_link(&preply->brp_un.brp_status, &pstat->brp_stlink, pstat);

  /* add attributes to the status reply */

  bad = 0;

  pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr);

  if (status_attrib(
        pal,
        svr_attr_def,
        server.sv_attr,
        SRV_ATR_LAST,
        preq->rq_perm,
        &pstat->brp_attr,
        &bad,
        1))    /* IsOwner == TRUE */
    {
    reply_badattr(PBSE_NOATTR, bad, pal, preq);
    }
  else
    {
    reply_send(preq);
    }

  return;
  }  /* END req_stat_svr() */
Esempio n. 11
0
static void req_stat_job_step2(

  struct stat_cntl *cntl)  /* I/O (freed on return) */

  {
  svrattrl        *pal;
  job         *pjob = NULL;

  struct batch_request *preq;

  struct batch_reply   *preply;
  int          rc = 0;

  enum TJobStatTypeEnum type;

  pbs_queue            *pque = NULL;
  int                   exec_only = 0;

  int                   IsTruncated = 0;

  long                  DTime;  /* delta time - only report full attribute list if J->MTime > DTime */

  static svrattrl      *dpal = NULL;
  
  int job_array_index = 0;
  job_array *pa = NULL;
  

  preq   = cntl->sc_origrq;
  type   = (enum TJobStatTypeEnum)cntl->sc_type;
  preply = &preq->rq_reply;

  /* See pbs_server_attributes(1B) for details on "poll_jobs" behaviour */

  /* NOTE:  If IsTruncated is true, should walk all queues and walk jobs in each queue
            until max_reported is reached (NYI) */

  if (dpal == NULL)
    {
    /* build 'delta' attribute list */

    svrattrl *tpal;

    tlist_head dalist;

    int aindex;

    int atrlist[] =
      {
      JOB_ATR_jobname,
      JOB_ATR_resc_used,
      JOB_ATR_LAST
      };

    CLEAR_LINK(dalist);

    for (aindex = 0;atrlist[aindex] != JOB_ATR_LAST;aindex++)
      {
      if ((tpal = attrlist_create("", "", 23)) == NULL)
        {
        return;
        }

      tpal->al_valln = atrlist[aindex];

      if (dpal == NULL)
        dpal = tpal;

      append_link(&dalist, &tpal->al_link, tpal);
      }
    }  /* END if (dpal == NULL) */

  if (type == tjstArray)
    {
    pa = get_array(preq->rq_ind.rq_status.rq_id);
    }

  if (!server.sv_attr[(int)SRV_ATR_PollJobs].at_val.at_long)
    {
    /* polljobs not set - indicates we may need to obtain fresh data from
       MOM */

    if (cntl->sc_jobid[0] == '\0')
      pjob = NULL;
    else
      pjob = find_job(cntl->sc_jobid);

    while (1)
      {
      if (pjob == NULL)
        {
        /* start from the first job */

        if (type == tjstJob)
          {
          pjob = find_job(preq->rq_ind.rq_status.rq_id);
          }
        else if (type == tjstQueue)
          {
          pjob = (job *)GET_NEXT(cntl->sc_pque->qu_jobs);
          }
        else if (type == tjstArray)
          {
          job_array_index = 0;
          /* increment job_array_index until we find a non-null pointer or hit the end */
          while (job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL)
             job_array_index++;
         
          }
        else
          {
          if ((type == tjstTruncatedServer) || (type == tjstTruncatedQueue))
            IsTruncated = TRUE;

          pjob = (job *)GET_NEXT(svr_alljobs);
          }
        }    /* END if (pjob == NULL) */
      else
        {
        /* get next job */

        if (type == tjstJob)
          break;

        if (type == tjstQueue)
          pjob = (job *)GET_NEXT(pjob->ji_jobque);
        else
          pjob = (job *)GET_NEXT(pjob->ji_alljobs);
          
        if (type == tjstArray)
          {
          pjob = NULL;
          /* increment job_array_index until we find a non-null pointer or hit the end */
          while (++job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL)
            ;
          }
        }

      if (pjob == NULL)
        break;

      /* PBS_RESTAT_JOB defaults to 30 seconds */

      if ((pjob->ji_qs.ji_substate == JOB_SUBSTATE_RUNNING) &&
          ((time_now - pjob->ji_momstat) > JobStatRate))
        {
        /* go to MOM for status */

        strcpy(cntl->sc_jobid, pjob->ji_qs.ji_jobid);

        if ((rc = stat_to_mom(pjob, cntl)) == PBSE_SYSTEM)
          {
          break;
          }

        if (rc != 0)
          {
          rc = 0;

          continue;
          }

        return; /* will pick up after mom replies */
        }
      }    /* END while(1) */

    if (cntl->sc_conn >= 0)
      svr_disconnect(cntl->sc_conn);  /* close connection to MOM */

    if (rc != 0)
      {
      free(cntl);

      reply_free(preply);

      req_reject(rc, 0, preq, NULL, "cannot get update from mom");

      return;
      }
    }    /* END if (!server.sv_attr[(int)SRV_ATR_PollJobs].at_val.at_long) */

  /*
   * now ready for part 3, building the status reply,
   * loop through again
   */

  if (type == tjstSummarizeArraysQueue || type == tjstSummarizeArraysServer)
    {
    update_array_statuses();
    }

  if (type == tjstJob)
    pjob = find_job(preq->rq_ind.rq_status.rq_id);
  else if (type == tjstQueue)
    pjob = (job *)GET_NEXT(cntl->sc_pque->qu_jobs);
  else if (type == tjstSummarizeArraysQueue)
    pjob = (job *)GET_NEXT(cntl->sc_pque->qu_jobs_array_sum);
  else if (type == tjstSummarizeArraysServer)
    pjob = (job *)GET_NEXT(svr_jobs_array_sum);
  else if (type == tjstArray)
    {
    job_array_index = 0;
    pjob = NULL;
    /* increment job_array_index until we find a non-null pointer or hit the end */
    while (job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL)
        job_array_index++;
    }
  else
    pjob = (job *)GET_NEXT(svr_alljobs);

  DTime = 0;

  if (preq->rq_extend != NULL)
    {
    char *ptr;

    /* FORMAT:  { EXECQONLY | DELTA:<EPOCHTIME> } */

    if (strstr(preq->rq_extend, EXECQUEONLY))
      exec_only = 1;

    ptr = strstr(preq->rq_extend, "DELTA:");

    if (ptr != NULL)
      {
      ptr += strlen("delta:");

      DTime = strtol(ptr, NULL, 10);
      }
    }

  free(cntl);

  if ((type == tjstTruncatedServer) || (type == tjstTruncatedQueue))
    {
    long sentJobCounter;
    long qjcounter;
    long qmaxreport;

    /* loop through all queues */

    for (pque = (pbs_queue *)GET_NEXT(svr_queues);
         pque != NULL;
         pque = (pbs_queue *)GET_NEXT(pque->qu_link))
      {
      qjcounter = 0;

      if ((exec_only == 1) &&
          (pque->qu_qs.qu_type != QTYPE_Execution))
        {
        /* ignore routing queues */

        continue;
        }

      if (((pque->qu_attr[QA_ATR_MaxReport].at_flags & ATR_VFLAG_SET) != 0) &&
          (pque->qu_attr[QA_ATR_MaxReport].at_val.at_long >= 0))
        {
        qmaxreport = pque->qu_attr[QA_ATR_MaxReport].at_val.at_long;
        }
      else
        {
        qmaxreport = TMAX_JOB;
        }

      if (LOGLEVEL >= 5)
        {
        sprintf(log_buffer,"giving scheduler up to %ld idle jobs in queue %s\n",
          qmaxreport,
          pque->qu_qs.qu_name);

        log_event(
          PBSEVENT_SYSTEM,
          PBS_EVENTCLASS_QUEUE,
          pque->qu_qs.qu_name,
          log_buffer);
        }

      sentJobCounter = 0;

      /* loop through jobs in queue */

      for (pjob = (job *)GET_NEXT(pque->qu_jobs);
           pjob != NULL;
           pjob = (job *)GET_NEXT(pjob->ji_jobque))
        {
        if ((qjcounter >= qmaxreport) &&
            (pjob->ji_qs.ji_state == JOB_STATE_QUEUED))
          {
          /* max_report of queued jobs reached for queue */

          continue;
          }

        pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr);

        rc = status_job(
               pjob,
               preq,
               (pjob->ji_wattr[(int)JOB_ATR_mtime].at_val.at_long >= DTime) ? pal : dpal,
               &preply->brp_un.brp_status,
               &bad);

        if ((rc != 0) && (rc != PBSE_PERM))
          {
          req_reject(rc, bad, preq, NULL, NULL);

          return;
          }

        sentJobCounter++;

        if (pjob->ji_qs.ji_state == JOB_STATE_QUEUED)
          qjcounter++;
        }    /* END for (pjob) */

      if (LOGLEVEL >= 5)
        {
        sprintf(log_buffer,"sent scheduler %ld total jobs for queue %s\n",
          sentJobCounter,
          pque->qu_qs.qu_name);

        log_event(
          PBSEVENT_SYSTEM,
          PBS_EVENTCLASS_QUEUE,
          pque->qu_qs.qu_name,
          log_buffer);
        }
      }      /* END for (pque) */

    reply_send(preq);

    return;
    }        /* END if ((type == tjstTruncatedServer) || ...) */

  while (pjob != NULL)
    {
    /* go ahead and build the status reply for this job */

    if (exec_only)
      {
      pque = find_queuebyname(pjob->ji_qs.ji_queue);

      if (pque->qu_qs.qu_type != QTYPE_Execution)
        goto nextjob;
      }

    pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr);

    rc = status_job(
           pjob,
           preq,
           pal,
           &preply->brp_un.brp_status,
           &bad);

    if ((rc != 0) && (rc != PBSE_PERM))
      {
      req_reject(rc, bad, preq, NULL, NULL);

      return;
      }

    /* get next job */

nextjob:

    if (type == tjstJob)
      break;

    if (type == tjstQueue)
      pjob = (job *)GET_NEXT(pjob->ji_jobque);
    else if (type == tjstSummarizeArraysQueue)
      pjob = (job *)GET_NEXT(pjob->ji_jobque_array_sum);
    else if (type == tjstSummarizeArraysServer)
      pjob = (job *)GET_NEXT(pjob->ji_jobs_array_sum);
    else if (type == tjstArray)
      {
      pjob = NULL;
      /* increment job_array_index until we find a non-null pointer or hit the end */
      while (++job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL)
        ;
      }
    else
      pjob = (job *)GET_NEXT(pjob->ji_alljobs);

    rc = 0;
    }  /* END while (pjob != NULL) */

 
  reply_send(preq);

  if (LOGLEVEL >= 7)
    {
    log_event(PBSEVENT_SYSTEM,
      PBS_EVENTCLASS_JOB,
      "req_statjob",
      "Successfully returned the status of queued jobs\n");
    }

  return;
  }  /* END req_stat_job_step2() */
Esempio n. 12
0
static int status_node(

  struct pbsnode       *pnode,    /* ptr to node receiving status query */
  struct batch_request *preq,
  tlist_head            *pstathd)  /* head of list to append status to  */

  {
  int       rc = 0;

  struct brp_status *pstat;
  svrattrl          *pal;

  if (pnode->nd_state & INUSE_DELETED)  /*node no longer valid*/
    {
    return(0);
    }

  if ((preq->rq_perm & ATR_DFLAG_RDACC) == 0)
    {
    return(PBSE_PERM);
    }

  /* allocate status sub-structure and fill in header portion */

  pstat = (struct brp_status *)malloc(sizeof(struct brp_status));

  if (pstat == NULL)
    {
    return(PBSE_SYSTEM);
    }

  memset(pstat, 0, sizeof(struct brp_status));

  pstat->brp_objtype = MGR_OBJ_NODE;

  strcpy(pstat->brp_objname, pnode->nd_name);

  CLEAR_LINK(pstat->brp_stlink);
  CLEAR_HEAD(pstat->brp_attr);

  /*add this new brp_status structure to the list hanging off*/
  /*the request's reply substructure                         */

  append_link(pstathd, &pstat->brp_stlink, pstat);

  /*point to the list of node-attributes about which we want status*/
  /*hang that status information from the brp_attr field for this  */
  /*brp_status structure                                           */

  bad = 0;                                    /*global variable*/

  if (preq->rq_ind.rq_status.rq_attr.ll_struct != NULL)
    pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr);
  else
    pal = NULL;

  rc = status_nodeattrib(
         pal,
         node_attr_def,
         pnode,
         ND_ATR_LAST,
         preq->rq_perm,
         &pstat->brp_attr,
         &bad);

  return(rc);
  }  /* END status_node() */
Esempio n. 13
0
void req_stat_node(

  struct batch_request *preq) /* ptr to the decoded request   */

  {
  char    *name;

  struct pbsnode *pnode = NULL;

  struct batch_reply *preply;
  svrattrl *pal;
  int     rc   = 0;
  int     type = 0;
  int     i;

  struct prop props;


  char     *id = "req_stat_node";

  /*
   * first, check that the server indeed has a list of nodes
   * and if it does, validate the name of the requested object--
   * either name is that of a specific node, or name[0] is null/@
   * meaning request is for all nodes in the server's jurisdiction
   */

  if (LOGLEVEL >= 6)
    {
    log_record(
      PBSEVENT_SCHED,
      PBS_EVENTCLASS_REQUEST,
      id,
      "entered");
    }

  if ((pbsndmast == NULL) || (svr_totnodes <= 0))
    {
    req_reject(PBSE_NONODES, 0, preq, NULL, "node list is empty - check 'server_priv/nodes' file");

    return;
    }

  name = preq->rq_ind.rq_status.rq_id;

  if ((*name == '\0') || (*name == '@'))
    {
    type = 1;
    }
  else if ((*name == ':') && (*(name + 1) != '\0'))
    {
    if (!strcmp(name + 1, "ALL"))
      {
      type = 1;  /* psuedo-group for all nodes */
      }
    else
      {
      type = 2;
      props.name = name + 1;
      props.mark = 1;
      props.next = NULL;
      }
    }
  else
    {
    pnode = find_nodebyname(name);

    if (pnode == NULL)
      {
      req_reject(PBSE_UNKNODE, 0, preq, NULL, "cannot locate specified node");

      return;
      }
    }

  preply = &preq->rq_reply;

  preply->brp_choice = BATCH_REPLY_CHOICE_Status;

  CLEAR_HEAD(preply->brp_un.brp_status);

  if (type == 0)
    {
    /* get status of the named node */

    rc = status_node(pnode, preq, &preply->brp_un.brp_status);
    }
  else
    {
    /* get status of all or several nodes */

    for (i = 0;i < svr_totnodes;i++)
      {
      pnode = pbsndmast[i];

      if ((type == 2) && !hasprop(pnode, &props))
        continue;

      if ((rc = status_node(pnode, preq, &preply->brp_un.brp_status)) != 0)
        break;
      }
    }

  if (!rc)
    {
    /* SUCCESS */

    reply_send(preq);
    }
  else
    {
    if (rc != PBSE_UNKNODEATR)
      {
      req_reject(rc, 0, preq, NULL, NULL);
      }
    else
      {
      pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr);

      reply_badattr(rc, bad, pal, preq);
      }
    }

  return;
  }  /* END req_stat_node() */
Esempio n. 14
0
static int status_que(

  pbs_queue            *pque,     /* ptr to que to status */
  struct batch_request *preq,
  tlist_head           *pstathd)  /* head of list to append status to */

  {

  struct brp_status *pstat;
  svrattrl          *pal;

  if ((preq->rq_perm & ATR_DFLAG_RDACC) == 0)
    {
    return(PBSE_PERM);
    }

  /* ok going to do status, update count and state counts from qu_qs */

  pque->qu_attr[(int)QA_ATR_TotalJobs].at_val.at_long = pque->qu_numjobs;

  pque->qu_attr[(int)QA_ATR_TotalJobs].at_flags |= ATR_VFLAG_SET;

  update_state_ct(
    &pque->qu_attr[(int)QA_ATR_JobsByState],
    pque->qu_njstate,
    pque->qu_jobstbuf);

  /* allocate status sub-structure and fill in header portion */

  pstat = (struct brp_status *)malloc(sizeof(struct brp_status));

  if (pstat == NULL)
    {
    return(PBSE_SYSTEM);
    }

  memset(pstat, 0, sizeof(struct brp_status));
  pstat->brp_objtype = MGR_OBJ_QUEUE;

  strcpy(pstat->brp_objname, pque->qu_qs.qu_name);

  CLEAR_LINK(pstat->brp_stlink);
  CLEAR_HEAD(pstat->brp_attr);

  append_link(pstathd, &pstat->brp_stlink, pstat);

  /* add attributes to the status reply */

  bad = 0;

  pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr);

  if (status_attrib(
        pal,
        que_attr_def,
        pque->qu_attr,
        QA_ATR_LAST,
        preq->rq_perm,
        &pstat->brp_attr,
        &bad,
        1) != 0)   /* IsOwner == TRUE */
    {
    return(PBSE_NOATTR);
    }

  return(0);
  }  /* END stat_que() */
Esempio n. 15
0
int attr_atomic_set(

  struct svrattrl *plist,
  pbs_attribute   *old,
  pbs_attribute   *new_attr,
  attribute_def   *pdef,
  int              limit,
  int              unkn,
  int              privil,
  int             *badattr)

  {
  int           acc;
  int           index;
  int           listidx;
  resource      *prc;
  int            rc;
  pbs_attribute  temp;
  int            resc_access_perm = privil; /* set privilege for decode_resc() */

  for (index = 0;index < limit;index++)
    clear_attr(new_attr + index, pdef + index);

  listidx = 0;

  rc = PBSE_NONE;

  while (plist != NULL)
    {
    listidx++;

    if ((index = find_attr(pdef, plist->al_name, limit)) < 0)
      {
      if (unkn < 0)
        {
        /*unknown attr isn't allowed*/
        rc =  PBSE_NOATTR;

        break;
        }
      else
        {
        index = unkn;  /* if unknown attr are allowed */
        }
      }

    /* have we privilege to set the pbs_attribute ? */

    acc = (pdef + index)->at_flags & ATR_DFLAG_ACCESS;

    if ((acc & privil & ATR_DFLAG_WRACC) == 0)
      {
      if (privil & ATR_DFLAG_SvWR)
        {
        /* from a daemon, just ignore this pbs_attribute */

        plist = GET_NEXT(plist->al_link);

        continue;
        }

      /* from user, error if can't write pbs_attribute */

      rc = PBSE_ATTRRO;

      break;
      }

    /* decode new_attr value */

    clear_attr(&temp, pdef + index);

    /* 
     * special gpu cases
     * 1) if only ncpus is specified, delete gpus resource if any
     * 2) if both ncpus and gpus specified, replace both
     */
    
    if ((strcmp(plist->al_name,ATTR_l) == 0) &&
      (strcmp(plist->al_resc,"ncpus") == 0))
      {
      char      *pc;
      if ((pc = strstr(plist->al_value,":gpus=")) != NULL)
        {
        /* save off gpu resource list then add new resource_list entry for it */
        char *gpuval;

        gpuval = strdup(pc+6);

        (*pc) = '\0';

        if (gpuval != NULL)
          {
          rc = (pdef + index)->at_decode(&temp, plist->al_name, "gpus",
              gpuval,ATR_DFLAG_ACCESS);

          free(gpuval);
          if (rc != 0)
            {
            if ((rc == PBSE_UNKRESC) && (unkn > 0))
              rc = 0; /* ignore the "error" */
            else
              break;
            }
          }
        }
      else
        {
        /* delete old resource_list.gpus value if any.
         * this can be done by setting it to zero
         */
        rc = (pdef + index)->at_decode(&temp, plist->al_name, "gpus",
            0,ATR_DFLAG_ACCESS);
        if (rc != 0)
          {
          if ((rc == PBSE_UNKRESC) && (unkn > 0))
            rc = 0; /* ignore the "error" */
          else
            break;
          }
        }
      }

    rc = (pdef + index)->at_decode(&temp, plist->al_name, plist->al_resc, plist->al_value,resc_access_perm);
    if (rc != 0)
      {
      if ((rc == PBSE_UNKRESC) && (unkn > 0))
        rc = 0; /* ignore the "error" */
      else
        break;
      }

    /* duplicate current value, if set AND not already dup-ed */

    if (((old + index)->at_flags & ATR_VFLAG_SET) &&
        !((new_attr + index)->at_flags & ATR_VFLAG_SET))
      {
      if ((rc = (pdef + index)->at_set(new_attr + index, old + index, SET)) != 0)
        break;

      /*
       * we need to know if the value is changed during
       * the next step, so clear MODIFY here; including
       * within resources.
       */

      (new_attr + index)->at_flags &= ~ATR_VFLAG_MODIFY;

      if ((new_attr + index)->at_type == ATR_TYPE_RESC)
        {
        prc = (resource *)GET_NEXT((new_attr + index)->at_val.at_list);

        while (prc)
          {
          prc->rs_value.at_flags &= ~ATR_VFLAG_MODIFY;
          prc = (resource *)GET_NEXT(prc->rs_link);
          }
        }
      }

    /* update new copy with temp, MODIFY is set on ones changed */

    if ((plist->al_op != INCR) && (plist->al_op != DECR) &&
        (plist->al_op != SET) && (plist->al_op != INCR_OLD))
      {
      plist->al_op = SET;
      }

    if (temp.at_flags & ATR_VFLAG_SET)
      {
      if ((rc = (pdef + index)->at_set(new_attr + index, &temp, plist->al_op)) != 0)
        {
        (pdef + index)->at_free(&temp);

        break;
        }
      }
    else if (temp.at_flags & ATR_VFLAG_MODIFY)
      {
      (pdef + index)->at_free(new_attr + index);

      (new_attr + index)->at_flags |= ATR_VFLAG_MODIFY;
      }

    (pdef + index)->at_free(&temp);

    if (plist->al_link.ll_next == NULL)
      break;

    plist = (struct svrattrl *)GET_NEXT(plist->al_link);
    } /* END while (plist != NULL) */

  if (rc != 0)
    {
    *badattr = listidx;

    for (index = 0; index < limit; index++)
      (pdef + index)->at_free(new_attr + index);

    return(rc);
    }

  return(0);
  }  /* END attr_atomic_set() */
Esempio n. 16
0
int add_encoded_attributes(

  xmlNodePtr     *attr_node, /* M attribute node */ 
  pbs_attribute  *pattr)     /* M ptr to pbs_attribute value array */

  {
  tlist_head  lhead;
  int         i;
  int         resc_access_perm = ATR_DFLAG_ACCESS;
  svrattrl   *pal;
  int         rc = PBSE_NONE;
  xmlNodePtr  attributeNode = *attr_node;
  char        buf[BUFSIZE];
  xmlNodePtr  pal_xmlNode;

  CLEAR_HEAD(lhead);
  xmlNodePtr  resource_list_head_node = NULL;
  xmlNodePtr  resource_used_head_node = NULL;
  xmlNodePtr  complete_req_head_node = NULL;

  for (i = 0; ((i < JOB_ATR_LAST) && (rc >= 0)); i++)
    {
    if ((job_attr_def[i].at_type != ATR_TYPE_ACL) &&
        ((pattr + i)->at_flags & ATR_VFLAG_SET))
      {
      if ((i != JOB_ATR_resource) &&
          (i != JOB_ATR_resc_used) &&
          (i != JOB_ATR_req_information))
        {
        std::string value;

#ifndef PBS_MOM
        if (i == JOB_ATR_depend)
          translate_dependency_to_string(pattr + i, value);
        else
#endif
          attr_to_str(value, job_attr_def + i, pattr[i], true);

        if (value.size() == 0)
          continue;

        pal_xmlNode = xmlNewChild(attributeNode,
                                  NULL,
                                  (xmlChar *)job_attr_def[i].at_name,
                                  (const xmlChar *)value.c_str());

        if (pal_xmlNode)
          {
          snprintf(buf, sizeof(buf), "%u", (unsigned int)pattr[i].at_flags);
          xmlSetProp(pal_xmlNode, (const xmlChar *)AL_FLAGS_ATTR, (const xmlChar *)buf);
          (pattr + i)->at_flags &= ~ATR_VFLAG_MODIFY;
          }
        }
      else
        {
        rc = job_attr_def[i].at_encode(pattr + i,
            &lhead,
            job_attr_def[i].at_name,
            NULL,
            ATR_ENCODE_SAVE,
            resc_access_perm);
        
        if (rc < 0)
          return -1;

        (pattr + i)->at_flags &= ~ATR_VFLAG_MODIFY;

        while ((pal = (svrattrl *)GET_NEXT(lhead)) != NULL)
          {
          if (i == JOB_ATR_resource)
            {
            pal_xmlNode = add_resource_list_attribute(ATTR_l,
                                                      attr_node,
                                                      &resource_list_head_node,
                                                      pal);
            }
          else if (i == JOB_ATR_req_information)
            {
            pal_xmlNode = add_resource_list_attribute(ATTR_req_information,
                                                      attr_node,
                                                      &complete_req_head_node,
                                                      pal);
            }
          else
            {
            pal_xmlNode = add_resource_list_attribute(ATTR_used,
                                                      attr_node,
                                                      &resource_used_head_node,
                                                      pal);
            }

            if (pal_xmlNode)
              {
              snprintf(buf, sizeof(buf), "%u", (unsigned int)pal->al_flags);
              xmlSetProp(pal_xmlNode, (const xmlChar *)AL_FLAGS_ATTR, (const xmlChar *)buf);
              }

            delete_link(&pal->al_link);
            free(pal);
            if (!pal_xmlNode)
              rc = -1;
          }
        }
      }
    }

  return (0);
  } /* END add_encoded_attributes */
Esempio n. 17
0
int attr_atomic_node_set(

  struct svrattrl *plist,    /* list of pbs_attribute modif structs */
  pbs_attribute   *old,      /* unused */
  pbs_attribute   *new_attr,      /* new pbs_attribute array begins here */
  attribute_def   *pdef,     /* begin array  definition structs */
  int              limit,    /* number elts in definition array */
  int              unkn,     /* <0 unknown attrib not permitted */
  int              privil,   /* requester's access privileges   */
  int             *badattr)  /* return list position wher bad   */

  {
  int           acc;
  int           index;
  int           listidx;
  int           rc = 0;
  pbs_attribute temp;

  listidx = 0;

  while (plist)
    {
    /*Traverse loop for each client entered pbs_attribute*/

    listidx++;

    if ((index = find_attr(pdef, plist->al_name, limit)) < 0)
      {
      if (unkn < 0)         /*if unknown attr not allowed*/
        {
        rc =  PBSE_NOATTR;
        break;
        }
      else
        index = unkn;  /*if unknown attr are allowed*/
      }


    /* The name of the pbs_attribute is in the definitions list*/
    /* Now, have we privilege to set the pbs_attribute ?       */
    /* Check access capabilities specified in the attrdef  */
    /* against the requestor's privilege level        */

    acc = (pdef + index)->at_flags & ATR_DFLAG_ACCESS;

    if ((acc & privil & ATR_DFLAG_WRACC) == 0)
      {
      if (privil & ATR_DFLAG_SvWR)
        {
        /*  from a daemon, just ignore this pbs_attribute */
        plist = (struct svrattrl *)GET_NEXT(plist->al_link);
        continue;
        }
      else
        {
        /*from user, no write access to pbs_attribute     */
        rc = PBSE_ATTRRO;
        break;
        }
      }

    /*decode new value*/

    clear_attr(&temp, pdef + index);

    if ((rc = (pdef + index)->at_decode(&temp, plist->al_name,
                                        plist->al_resc, plist->al_value,0) != 0))
      {
      if ((rc == PBSE_UNKRESC) && (unkn > 0))
        rc = 0;              /*ignore the "error"*/
      else
        break;
      }

    /*update "new" with "temp", MODIFY is set on "new" if changed*/

    (new_attr + index)->at_flags &= ~ATR_VFLAG_MODIFY;

    if ((plist->al_op != INCR) && (plist->al_op != DECR) &&
        (plist->al_op != SET) && (plist->al_op != INCR_OLD))
      plist->al_op = SET;


    if (temp.at_flags & ATR_VFLAG_SET)
      {
      /* "temp" has a data value, use it to update "new" */

      if ((rc = (pdef + index)->at_set(new_attr + index, &temp, plist->al_op)) != 0)
        {
        (pdef + index)->at_free(&temp);
        break;
        }
      }
    else if (temp.at_flags & ATR_VFLAG_MODIFY)
      {

      (pdef + index)->at_free(new_attr + index);
      (new_attr + index)->at_flags |= ATR_VFLAG_MODIFY;
      }

    (pdef + index)->at_free(&temp);
    plist = (struct svrattrl *)GET_NEXT(plist->al_link);
    }

  if (rc != 0)
    {

    /*"at_free" functions get invoked by upstream caller*/
    /*invoking attr_atomic_kill() on the array of       */
    /*node-pbs_attribute structs-- any hanging structs are  */
    /*freed and then the node-pbs_attribute array is freed  */

    *badattr = listidx;   /*the svrattrl that gave a problem*/
    }

  return (rc);
  }
Esempio n. 18
0
/* Is this dead code? It isn't called anywhere. */
int add_walltime_remaining(
   
  int             index,
  pbs_attribute  *pattr,
  tlist_head     *phead)

  {
  int            len = 0;
  char           buf[MAXPATHLEN+1];
  char          *pname;
  svrattrl      *pal;
  resource      *pres;
  
  int            found = 0;
  long  remaining = 0;
  long  upperBound = 0;
  time_t         time_now   = time(NULL);

  /* encode walltime remaining, this is custom because walltime 
   * remaining isn't an pbs_attribute */
  if ((pattr + JOB_ATR_state)->at_val.at_char != 'R')
    {
    /* only for running jobs, do nothing */
    return(PBSE_NONE);
    }
  
  if (((pattr + JOB_ATR_resource)->at_val.at_list.ll_next != NULL) &&
      ((pattr + JOB_ATR_resource)->at_flags & ATR_VFLAG_SET))
    {
    pres = (resource *)GET_NEXT((pattr + JOB_ATR_resource)->at_val.at_list);
    
    if ((pattr + JOB_ATR_comp_time)->at_flags & ATR_VFLAG_SET)
      upperBound = (pattr + JOB_ATR_comp_time)->at_val.at_long;
    else
      upperBound = time_now;
    
    /* find the walltime resource */
    for (;pres != NULL;pres = (resource *)GET_NEXT(pres->rs_link))
      {
      pname = pres->rs_defin->rs_name;
      
      if (strcmp(pname, "walltime") == 0)
        {
        /* found walltime */
        long value = pres->rs_value.at_val.at_long;
        remaining = value - (time_now - (pattr + index)->at_val.at_long);
        found = upperBound * 12;
        found = TRUE;
        break;
        }
      }
    }
  
  if (found == TRUE)
    {
    snprintf(buf,MAXPATHLEN,"%ld",remaining);
    
    len = strlen(buf);
    pal = attrlist_create("Walltime","Remaining",len+1);
    
    if (pal != NULL)
      {
      memcpy(pal->al_value,buf,len);
      pal->al_flags = ATR_VFLAG_SET;
      append_link(phead,&pal->al_link,pal);
      }
    }

  return(PBSE_NONE);
  } /* END add_walltime_remaining() */
Esempio n. 19
0
/*
 * delete_array_range()
 *
 * deletes a range from a specific array
 *
 * @param pa - the array whose jobs are deleted
 * @param range_str - the user-given range to delete 
 * @return - the number of jobs skipped, -1 if range error 
 */
int delete_array_range(

  job_array *pa,
  char      *range_str)

  {
  tlist_head          tl;
  array_request_node *rn;
  array_request_node *to_free;
  job                *pjob;
  char               *range;

  int                 i;
  int                 num_skipped = 0;
  int                 num_deleted = 0;
  int                 deleted;
  int                 running;

  /* get just the numeric range specified, '=' should
   * always be there since we put it there in qdel */
  if((range = strchr(range_str,'=')) == NULL)
    return(-1);
  range++; /* move past the '=' */

  CLEAR_HEAD(tl);
  if (parse_array_request(range,&tl) > 0)
    {
    /* don't delete jobs if range error */

    return(-1);
    }

  rn = (array_request_node*)GET_NEXT(tl);

  while (rn != NULL)
    {
    for (i = rn->start; i <= rn->end; i++)
      {
      if (pa->job_ids[i] == NULL)
        continue;

      /* don't stomp on other memory */
      if (i >= pa->ai_qs.array_size)
        continue;

      if ((pjob = svr_find_job(pa->job_ids[i], FALSE)) == NULL)
        {
        free(pa->job_ids[i]);
        pa->job_ids[i] = NULL;
        }
      else
        {
        if (pjob->ji_qs.ji_state >= JOB_STATE_EXITING)
          {
          /* invalid state for request,  skip */
          unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL);
          continue;
          }

        running = (pjob->ji_qs.ji_state == JOB_STATE_RUNNING);

        pthread_mutex_unlock(pa->ai_mutex);
        deleted = attempt_delete(pjob);

        if (deleted == FALSE)
          {
          /* if the job was deleted, this mutex would be taked care of elsewhere. When it fails,
           * release it here */
          unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL);

          num_skipped++;
          }
        else if (running == FALSE)
          {
          /* running jobs will increase the deleted count when their obit is reported */
          num_deleted++;
          }

        pthread_mutex_lock(pa->ai_mutex);
        }
      }

    to_free = rn;
    rn = (array_request_node*)GET_NEXT(rn->request_tokens_link);

    /* release mem */
    free(to_free);
    }

  pa->ai_qs.num_failed += num_deleted;

  return(num_skipped);
  } /* END delete_array_range() */
Esempio n. 20
0
int status_attrib(

  svrattrl      *pal,      /* I */
  attribute_def *padef,
  pbs_attribute *pattr,
  int            limit,
  int            priv,
  tlist_head    *phead,
  int           *bad,
  int            IsOwner)  /* 0 == FALSE, 1 == TRUE */

  {
  int    index;
  int    nth = 0;
  int    resc_access_perm;

  priv &= ATR_DFLAG_RDACC;  /* user-client privilege  */
  resc_access_perm = priv; 

  /* for each pbs_attribute asked for or for all attributes, add to reply */

  if (pal != NULL)
    {
    /* client specified certain attributes */

    while (pal != NULL)
      {
      ++nth;

      index = find_attr(padef, pal->al_name, limit);

      if (index < 0)
        {
        *bad = nth;

        /* FAILURE */
        return(-1);
        }

      if ((padef + index)->at_flags & priv)
        {
        if (!(((padef + index)->at_flags & ATR_DFLAG_PRIVR) && (IsOwner == 0)))
          {
          (padef + index)->at_encode(
            pattr + index,
            phead,
            (padef + index)->at_name,
            NULL,
            ATR_ENCODE_CLIENT,
            resc_access_perm);
          }
        }

      pal = (svrattrl *)GET_NEXT(pal->al_link);
      }

    /* SUCCESS */
    return(PBSE_NONE);
    }    /* END if (pal != NULL) */

  /* attrlist not specified, return all readable attributes */

  for (index = 0;index < limit;index++)
    {
    if (((padef + index)->at_flags & priv) &&
        !((padef + index)->at_flags & ATR_DFLAG_NOSTAT))
      {
      if (!(((padef + index)->at_flags & ATR_DFLAG_PRIVR) && (IsOwner == 0)))
        {
        (padef + index)->at_encode(
          pattr + index,
          phead,
          (padef + index)->at_name,
          NULL,
          ATR_ENCODE_CLIENT,
          resc_access_perm);

        /* add walltime remaining if started */
        if ((index == JOB_ATR_start_time) &&
            ((pattr + index)->at_flags & ATR_VFLAG_SET))
          add_walltime_remaining(index, pattr, phead);
        }
      }
    }    /* END for (index) */

  /* SUCCESS */
  return(PBSE_NONE);
  }  /* END status_attrib() */
Esempio n. 21
0
int modify_array_range(

  job_array *pa,              /* I/O */
  char      *range,           /* I */
  svrattrl  *plist,           /* I */
  struct batch_request *preq, /* I */
  int        checkpoint_req)  /* I */

  {
  char                log_buf[LOCAL_LOG_BUF_SIZE];
  tlist_head          tl;
  int                 i;
  int                 rc;
  int                 mom_relay = 0;
  job                *pjob;

  array_request_node *rn;
  array_request_node *to_free;
  
  CLEAR_HEAD(tl);
  
  if (parse_array_request(range,&tl) > 0)
    {
    /* don't hold the jobs if range error */
    
    return(FAILURE);
    }
  else 
    {
    /* hold just that range from the array */
    rn = (array_request_node*)GET_NEXT(tl);
    
    while (rn != NULL)
      {
      for (i = rn->start; i <= rn->end; i++)
        {
        if ((i >= pa->ai_qs.array_size) ||
            (pa->job_ids[i] == NULL))
          continue;

        if ((pjob = svr_find_job(pa->job_ids[i], FALSE)) == NULL)
          {
          free(pa->job_ids[i]);
          pa->job_ids[i] = NULL;
          }
        else
          {
          pthread_mutex_unlock(pa->ai_mutex);
          rc = modify_job((void **)&pjob, plist, preq, checkpoint_req, NO_MOM_RELAY);
          pa = get_jobs_array(&pjob);
          
          if (pjob != NULL)
            {
            if (rc == PBSE_RELAYED_TO_MOM)
              {
              struct batch_request *array_req = NULL;
              
              /* We told modify_job not to call relay_to_mom so we need to contact the mom */
              if ((rc = copy_batchrequest(&array_req, preq, 0, i)) != PBSE_NONE)
                {
                return(rc);
                }
              
              preq->rq_refcount++;
              if (mom_relay == 0)
                {
                preq->rq_refcount++;
                }
              mom_relay++;
              
              /* The array_req is freed in relay_to_mom (failure)
               * or in issue_Drequest (success) */
              
              if ((rc = relay_to_mom(&pjob, array_req, NULL)))
                {
                snprintf(log_buf,sizeof(log_buf),
                  "Unable to relay information to mom for job '%s'\n",
                  pjob->ji_qs.ji_jobid);
                log_err(rc, __func__, log_buf);
                
                unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL);
                
                return(rc); /* unable to get to MOM */
                }
              else
                {
                unlock_ji_mutex(pjob, __func__, "2", LOGLEVEL);
                post_modify_arrayreq(array_req);
                }
              }
            else
              unlock_ji_mutex(pjob, __func__, "2", LOGLEVEL);
            }
          else
            pa->job_ids[i] = NULL;

          }
        }
      
      /* release mem */
      to_free = rn;
      rn = (array_request_node*)GET_NEXT(rn->request_tokens_link);
      free(to_free);
      }
    }

  if (mom_relay)
    {
    preq->rq_refcount--;
    if (preq->rq_refcount == 0)
      {
      free_br(preq);
      }
    return(PBSE_RELAYED_TO_MOM);
    }

  return(PBSE_NONE);
  } /* END modify_array_range() */
Esempio n. 22
0
void mom_job_purge(

  job *pjob)  /* I (modified) */

  {
  job_file_delete_info *jfdi;

  jfdi = (job_file_delete_info *)calloc(1, sizeof(job_file_delete_info));

  if (jfdi == NULL)
    {
    log_err(ENOMEM,__func__, (char *)"No space to allocate info for job file deletion");
    return;
    }

#ifdef NVIDIA_GPUS
  /*
   * Did this job have a gpuid assigned?
   * if so, then update gpu status
   */
  if (((pjob->ji_wattr[JOB_ATR_exec_gpus].at_flags & ATR_VFLAG_SET) != 0) &&
      (pjob->ji_wattr[JOB_ATR_exec_gpus].at_val.at_str != NULL))
    {
    send_update_soon();
    }
#endif  /* NVIDIA_GPUS */

  /* initialize struct information */
  if (pjob->ji_flags & MOM_HAS_TMPDIR)
    {
    jfdi->has_temp_dir = TRUE;
    pjob->ji_flags &= ~MOM_HAS_TMPDIR;
    }
  else
    jfdi->has_temp_dir = FALSE;

  strcpy(jfdi->jobid,pjob->ji_qs.ji_jobid);
  strcpy(jfdi->prefix,pjob->ji_qs.ji_fileprefix);

  if ((pjob->ji_wattr[JOB_ATR_checkpoint_dir].at_flags & ATR_VFLAG_SET) &&
      (pjob->ji_wattr[JOB_ATR_checkpoint_name].at_flags & ATR_VFLAG_SET))
    jfdi->checkpoint_dir = strdup(pjob->ji_wattr[JOB_ATR_checkpoint_dir].at_val.at_str);

  jfdi->gid = pjob->ji_qs.ji_un.ji_momt.ji_exgid;
  jfdi->uid = pjob->ji_qs.ji_un.ji_momt.ji_exuid;

  if (thread_unlink_calls == TRUE)
    enqueue_threadpool_request(delete_job_files,jfdi);
  else
    delete_job_files(jfdi);

  /* remove this job from the global queue */
  delete_link(&pjob->ji_jobque);
  delete_link(&pjob->ji_alljobs);

  if (LOGLEVEL >= 6)
    {
    sprintf(log_buffer,"removing job");

    log_record(PBSEVENT_DEBUG, PBS_EVENTCLASS_JOB, pjob->ji_qs.ji_jobid, log_buffer);
    }

#if IBM_SP2==2        /* IBM SP PSSP 3.1 */
  unload_sp_switch(pjob);

#endif   /* IBM SP */

  mom_job_free(pjob);

  /* if no jobs are left, check if MOM should be restarted */

  if (((job *)GET_NEXT(svr_alljobs)) == NULL)
    MOMCheckRestart();

  return;
  }  /* END mom_job_purge() */
Esempio n. 23
0
/* delete a job array struct from memory and disk. This is used when the number
 *  of jobs that belong to the array becomes zero.
 *  returns zero if there are no errors, non-zero otherwise
 */
int array_delete(
    
  job_array *pa)

  {
  int                      i;
  char                     path[MAXPATHLEN + 1];
  char                     log_buf[LOCAL_LOG_BUF_SIZE];
  array_request_node      *rn;
  struct array_depend     *pdep;
  struct array_depend_job *pdj;

  /* first thing to do is take this out of the servers list of all arrays */
  remove_array(pa);

  /* unlock the mutex and free it */
  unlock_ai_mutex(pa, __func__, "1", LOGLEVEL);
  free(pa->ai_mutex);

  /* delete the on disk copy of the struct */
  snprintf(path, sizeof(path), "%s%s%s",
    path_arrays, pa->ai_qs.fileprefix, ARRAY_FILE_SUFFIX);

  if (unlink(path))
    {
    sprintf(log_buf, "unable to delete %s", path);
    log_err(errno, "array_delete", log_buf);
    }

  /* clear array request linked list */
  for (rn = (array_request_node *)GET_NEXT(pa->request_tokens);
       rn != NULL;
       rn = (array_request_node *)GET_NEXT(pa->request_tokens))
    {
    delete_link(&rn->request_tokens_link);
    free(rn);
    }

  /* free the memory for the job pointers */
  for (i = 0; i < pa->ai_qs.array_size; i++)
    {
    if (pa->job_ids[i] != NULL)
      free(pa->job_ids[i]);
    }

  free(pa->job_ids);

  /* free the dependencies, if any */
  for (pdep = (struct array_depend *)GET_NEXT(pa->ai_qs.deps); 
       pdep != NULL;
       pdep = (struct array_depend *)GET_NEXT(pa->ai_qs.deps))
    {
    delete_link(&pdep->dp_link);

    for (pdj = (struct array_depend_job *)GET_NEXT(pdep->dp_jobs);
         pdj != NULL;
         pdj = (struct array_depend_job *)GET_NEXT(pdep->dp_jobs))
      {
      delete_link(&pdj->dc_link);
      free(pdj);
      }

    free(pdep);
    }

  /* purge the "template" job, 
     this also deletes the shared script file for the array*/
  if (pa->ai_qs.parent_id[0] != '\0')
    {
    job *pjob;
    if ((pjob = svr_find_job(pa->ai_qs.parent_id, FALSE)) != NULL)
      svr_job_purge(pjob);
    }

  /* free the memory allocated for the struct */
  free(pa);

  return(PBSE_NONE);
  } /* END array_delete() */
Esempio n. 24
0
void hud_scrollback_do_frame(float frametime)
{
	int i, k, x, y;
	int font_height = gr_get_font_height();

	k = Ui_window.process();
	switch (k) {
		case KEY_RIGHT:
		case KEY_TAB:
			if (Scrollback_mode == SCROLLBACK_MODE_OBJECTIVES) {
				Scrollback_mode = SCROLLBACK_MODE_MSGS_LOG;
				Scroll_max = hud_query_scrollback_size();
				hud_scroll_reset();

			} else if (Scrollback_mode == SCROLLBACK_MODE_MSGS_LOG) {
				Scrollback_mode = SCROLLBACK_MODE_EVENT_LOG;
				Scroll_max = Num_log_lines * gr_get_font_height();
				hud_scroll_reset();

			} else {
				Scrollback_mode = SCROLLBACK_MODE_OBJECTIVES;
				Scroll_max = Num_obj_lines * gr_get_font_height();
				Scroll_offset = 0;
			}

			break;

		case KEY_LEFT:
		case KEY_SHIFTED | KEY_TAB:
			if (Scrollback_mode == SCROLLBACK_MODE_OBJECTIVES) {
				Scrollback_mode = SCROLLBACK_MODE_EVENT_LOG;
				Scroll_max = Num_log_lines * gr_get_font_height();
				hud_scroll_reset();

			} else if (Scrollback_mode == SCROLLBACK_MODE_MSGS_LOG) {
				Scrollback_mode = SCROLLBACK_MODE_OBJECTIVES;
				Scroll_max = Num_obj_lines * gr_get_font_height();
				Scroll_offset = 0;

			} else {
				Scrollback_mode = SCROLLBACK_MODE_MSGS_LOG;
				Scroll_max = hud_query_scrollback_size();
				hud_scroll_reset();
			}

			break;

		case KEY_PAGEUP:
			hud_page_scroll_list(1);
			break;

		case KEY_PAGEDOWN:
			hud_page_scroll_list(0);
			break;

		case KEY_ENTER:
		case KEY_CTRLED | KEY_ENTER:
		case KEY_ESC:			
			hud_scrollback_exit();
			break;

		case KEY_F1:  // show help overlay
			break;

		case KEY_F2:  // goto options screen
			gameseq_post_event(GS_EVENT_OPTIONS_MENU);
			break;
	}	// end switch

	for (i=0; i<NUM_BUTTONS; i++){
		if (Buttons[gr_screen.res][i].button.pressed()){
			hud_scrollback_button_pressed(i);		
		}
	}

	GR_MAYBE_CLEAR_RES(Background_bitmap);
	if (Background_bitmap >= 0) {
		gr_set_bitmap(Background_bitmap, GR_ALPHABLEND_NONE, GR_BITBLT_MODE_NORMAL, 1.0f, -1, -1);
		gr_bitmap(0, 0);
	}

#ifdef MAKE_FS1
	if ((Scrollback_mode == SCROLLBACK_MODE_OBJECTIVES) && (Status_bitmap >= 0)) {
		gr_set_bitmap(Status_bitmap, GR_ALPHABLEND_NONE, GR_BITBLT_MODE_NORMAL, 1.0f, -1, -1);
		gr_bitmap(Hud_mission_log_status_coords[gr_screen.res][0], Hud_mission_log_status_coords[gr_screen.res][1]);
	}
#endif

	// draw the objectives key at the bottom of the ingame objectives screen
	if (Scrollback_mode == SCROLLBACK_MODE_OBJECTIVES) {
		ML_render_objectives_key();
	}

	Ui_window.draw();

	if (Scrollback_mode == SCROLLBACK_MODE_EVENT_LOG) {
		Buttons[gr_screen.res][SHOW_EVENTS_BUTTON].button.draw_forced(2);
		mission_log_scrollback(Scroll_offset, Hud_mission_log_list_coords[gr_screen.res][0], Hud_mission_log_list_coords[gr_screen.res][1], Hud_mission_log_list_coords[gr_screen.res][2], Hud_mission_log_list_coords[gr_screen.res][3]);

	} else if (Scrollback_mode == SCROLLBACK_MODE_OBJECTIVES) {
		Buttons[gr_screen.res][SHOW_OBJS_BUTTON].button.draw_forced(2);
		ML_objectives_do_frame(Scroll_offset);

	} else {
		line_node *node_ptr;

		Buttons[gr_screen.res][SHOW_MSGS_BUTTON].button.draw_forced(2);
//		y = ((LIST_H / font_height) - 1) * font_height;
		y = 0;
		if ( !EMPTY(&Msg_scrollback_used_list) && HUD_msg_inited ) {
			node_ptr = GET_FIRST(&Msg_scrollback_used_list);
			i = 0;
			while ( node_ptr != END_OF_LIST(&Msg_scrollback_used_list) ) {
				if ((node_ptr->source == HUD_SOURCE_HIDDEN) || (i++ < Scroll_offset)) {
					node_ptr = GET_NEXT(node_ptr);

				} else {
					switch (node_ptr->source) {
						case HUD_SOURCE_FRIENDLY:
							SET_COLOR_FRIENDLY;
							break;

						case HUD_SOURCE_HOSTILE:
							SET_COLOR_HOSTILE;
							break;

						case HUD_SOURCE_NEUTRAL:
							SET_COLOR_NEUTRAL;
							break;

						case HUD_SOURCE_UNKNOWN:
							SET_COLOR_UNKNOWN;
							break;

						case HUD_SOURCE_TRAINING:
							gr_set_color_fast(&Color_bright_blue);
							break;

						case HUD_SOURCE_TERRAN_CMD:
							gr_set_color_fast(&Color_bright_white);
							break;

						case HUD_SOURCE_IMPORTANT:
						case HUD_SOURCE_FAILED:
						case HUD_SOURCE_SATISFIED:
							gr_set_color_fast(&Color_bright_white);
							break;

						default:
							gr_set_color_fast(&Color_text_normal);
							break;
					}

					if (node_ptr->time)
						gr_print_timestamp(Hud_mission_log_list_coords[gr_screen.res][0], Hud_mission_log_list_coords[gr_screen.res][1] + y, node_ptr->time);

					x = Hud_mission_log_list2_coords[gr_screen.res][0] + node_ptr->x;
					gr_printf(x, Hud_mission_log_list_coords[gr_screen.res][1] + y, "%s", node_ptr->text);
					if (node_ptr->underline_width)
						gr_line(x, Hud_mission_log_list_coords[gr_screen.res][1] + y + font_height - 1, x + node_ptr->underline_width, Hud_mission_log_list_coords[gr_screen.res][1] + y + font_height - 1);

					if ((node_ptr->source == HUD_SOURCE_FAILED) || (node_ptr->source == HUD_SOURCE_SATISFIED)) {
						// draw goal icon
						if (node_ptr->source == HUD_SOURCE_FAILED)
							gr_set_color_fast(&Color_bright_red);
						else
							gr_set_color_fast(&Color_bright_green);

						i = Hud_mission_log_list_coords[gr_screen.res][1] + y + font_height / 2 - 1;
						gr_circle(Hud_mission_log_list2_coords[gr_screen.res][0] - 6, i, 5);

						gr_set_color_fast(&Color_bright);
						gr_line(Hud_mission_log_list2_coords[gr_screen.res][0] - 10, i, Hud_mission_log_list2_coords[gr_screen.res][0] - 8, i);
						gr_line(Hud_mission_log_list2_coords[gr_screen.res][0] - 6, i - 4, Hud_mission_log_list2_coords[gr_screen.res][0] - 6, i - 2);
						gr_line(Hud_mission_log_list2_coords[gr_screen.res][0] - 4, i, Hud_mission_log_list2_coords[gr_screen.res][0] - 2, i);
						gr_line(Hud_mission_log_list2_coords[gr_screen.res][0] - 6, i + 2, Hud_mission_log_list2_coords[gr_screen.res][0] - 6, i + 4);
					}

					y += font_height + node_ptr->y;
					node_ptr = GET_NEXT(node_ptr);
					if (y + font_height > Hud_mission_log_list_coords[gr_screen.res][3])
						break;
				}
			}
		}
	}

	gr_set_color_fast(&Color_text_heading);
	gr_print_timestamp(Hud_mission_log_time_coords[gr_screen.res][0], Hud_mission_log_time_coords[gr_screen.res][1] - font_height, (int) (f2fl(Missiontime) * 1000));
	gr_string(Hud_mission_log_time2_coords[gr_screen.res][0], Hud_mission_log_time_coords[gr_screen.res][1] - font_height, XSTR( "Current time", 289));
	gr_flip();
}
Esempio n. 25
0
int parse_array_request(
    
  char       *request, 
  tlist_head *tl)

  {
  char                *temp_str;
  int                  num_tokens;
  char               **tokens;
  int                  i;
  int                  j;
  int                  num_elements;
  int                  start;
  int                  end;
  int                  num_bad_tokens;
  int                  searching;
  array_request_node  *rn;
  array_request_node  *rn2;

  if ((request == NULL) || 
      (request[0] == '\0') || 
      (tl == NULL))
    {
    return 1; /* return "bad_token_count" as greater than 0 so caller knows there are problems */
    }
  temp_str = strdup(request);
  num_tokens = array_request_token_count(request);
  num_bad_tokens = 0;

  tokens = (char**)calloc(num_tokens, sizeof(char *));
  j = num_tokens - 1;
  /* start from back and scan backwards setting pointers to tokens and changing ',' to '\0' */

  for (i = strlen(temp_str) - 1; i >= 0; i--)
    {
    if (temp_str[i] == ',')
      {
      tokens[j--] = &temp_str[i+1];
      temp_str[i] = '\0';
      }
    else if (i == 0)
      {
      tokens[0] = temp_str;
      }
    }

  for (i = 0; i < num_tokens; i++)
    {
    num_elements = array_request_parse_token(tokens[i], &start, &end);

    if (num_elements == 0)
      {
      num_bad_tokens++;
      }
    else
      {
      rn = (array_request_node*)calloc(1, sizeof(array_request_node));
      rn->start = start;
      rn->end = end;
      CLEAR_LINK(rn->request_tokens_link);

      rn2 = (array_request_node *)GET_NEXT(*tl);
      searching = TRUE;

      while (searching)
        {

        if (rn2 == NULL)
          {
          append_link(tl, &rn->request_tokens_link, (void*)rn);
          searching = FALSE;
          }
        else if (rn->start < rn2->start)
          {
          insert_link(&rn2->request_tokens_link, &rn->request_tokens_link, (void*)rn,
                      LINK_INSET_BEFORE);
          searching = FALSE;
          }
        else
          {
          rn2 = (array_request_node *)GET_NEXT(rn2->request_tokens_link);
          }

        }

      rn2 = (array_request_node *)GET_PRIOR(rn->request_tokens_link);

      if (rn2 != NULL && rn2->end >= rn->start)
        {
        num_bad_tokens++;
        }

      rn2 = (array_request_node *)GET_NEXT(rn->request_tokens_link);

      if (rn2 != NULL && rn2->start <= rn->end)
        {
        num_bad_tokens++;
        }

      }
    }

  free(tokens);

  free(temp_str);

  return num_bad_tokens;
  } /* END parse_array_request() */
Esempio n. 26
0
// Try and find a new locking point
void hud_lock_get_new_lock_pos(object *target_objp)
{
	ship			*target_shipp=NULL;
	int			lock_in_range=0;
	float			best_lock_dot=-1.0f, lock_dot=-1.0f;
	ship_subsys	*ss;
	vec3d		subsys_world_pos, vec_to_lock;
	ship_weapon *swp;
	weapon_info *wip;

	if ( target_objp->type == OBJ_SHIP ) {
		target_shipp = &Ships[target_objp->instance];
	}

	swp = &Player_ship->weapons;
	wip = &Weapon_info[swp->secondary_bank_weapons[swp->current_secondary_bank]];

	// if a large ship, lock to pos closest to center and within range
	if ( (target_shipp) && (Ship_info[target_shipp->ship_info_index].flags & (SIF_BIG_SHIP|SIF_HUGE_SHIP)) &&
		 !(wip->wi_flags & WIF_HOMING_JAVELIN) ) {
		// check all the subsystems and the center of the ship
		
		// assume best lock pos is the center of the ship
		lock_world_pos = target_objp->pos;
		Player->locking_on_center=1;
		Player->locking_subsys=NULL;
		Player->locking_subsys_parent=-1;
		lock_in_range = hud_lock_world_pos_in_range(&lock_world_pos, &vec_to_lock);
		vm_vec_normalize(&vec_to_lock);
		if ( lock_in_range ) {
			best_lock_dot=vm_vec_dot(&Player_obj->orient.vec.fvec, &vec_to_lock);
		} 
		// take center if reasonable dot
		if ( best_lock_dot > 0.95 ) {
			return;
		}

		// iterate through subsystems to see if we can get a better choice
		ss = GET_FIRST(&target_shipp->subsys_list);
		while ( ss != END_OF_LIST( &target_shipp->subsys_list ) ) {

			// get world pos of subsystem
			get_subsystem_world_pos(target_objp, ss, &subsys_world_pos);

			if ( hud_lock_world_pos_in_range(&subsys_world_pos, &vec_to_lock) ) {
				vm_vec_normalize(&vec_to_lock);
				lock_dot=vm_vec_dot(&Player_obj->orient.vec.fvec, &vec_to_lock);
				if ( lock_dot > best_lock_dot ) {
					best_lock_dot=lock_dot;
					Player->locking_on_center=0;
					Player->locking_subsys=ss;
					Player->locking_subsys_parent=Player_ai->target_objnum;
					lock_world_pos = subsys_world_pos;
				}
			}
			ss = GET_NEXT( ss );
		}
	} else if ( (target_shipp) && (wip->wi_flags & WIF_HOMING_JAVELIN)) {
		Player->locking_subsys = ship_get_closest_subsys_in_sight(target_shipp, SUBSYSTEM_ENGINE, &Player_obj->pos);
		if (Player->locking_subsys != NULL) {
			get_subsystem_world_pos(target_objp, Player->locking_subsys, &lock_world_pos);
			Player->locking_on_center=0;
			Player->locking_subsys_parent=Player_ai->target_objnum;
		} else {
			hud_lock_reset();
			return;
		}
	} else {
		// if small ship (or weapon), just go for the center
		lock_world_pos = target_objp->pos;
		Player->locking_on_center=1;
		Player->locking_subsys=NULL;
		Player->locking_subsys_parent=-1;
	}
}
Esempio n. 27
0
static char *resc_to_string(

  job       *pjob,      /* I (optional - if specified, report total job resources) */
  int        aindex,    /* I which pbs_attribute to convert */
  char      *buf,       /* O the buffer into which to convert */
  int        buflen)    /* I the length of the above buffer */

  {
  int            need;
  svrattrl      *patlist;
  tlist_head     svlist;
  pbs_attribute *pattr;

  int            isfirst = 1;

  CLEAR_HEAD(svlist);

  *buf = '\0';

  pattr = &pjob->ji_wattr[aindex];

  /* pack the list of resources into svlist */

  if (aindex == JOB_ATR_resource)
    {
    if (encode_resc(pattr, &svlist, (char *)"x", NULL, ATR_ENCODE_CLIENT, ATR_DFLAG_ACCESS) <= 0)
      {
      return(buf);
      }
    }
  else if (aindex == JOB_ATR_resc_used)
    {
    encode_used(pjob, ATR_DFLAG_RDACC, NULL, &svlist);
    }
  else
    {
    return(buf);
    }

  /* unpack svlist into a comma-delimited string */

  patlist = (svrattrl *)GET_NEXT(svlist);

  while (patlist != NULL)
    {
    need = strlen(patlist->al_resc) + strlen(patlist->al_value) + 3;

    if (need >= buflen)
      {
      patlist = (svrattrl *)GET_NEXT(patlist->al_link);

      continue;
      }

    if (LOGLEVEL >= 7)
      {
      fprintf(stderr, "Epilog:  %s=%s\n",
              patlist->al_resc,
              patlist->al_value);
      }

    if (isfirst == 1)
      {
      isfirst = 0;
      }
    else
      {
      strcat(buf, ",");
      buflen--;
      }

    strcat(buf, patlist->al_resc);

    strcat(buf, "=");
    strcat(buf, patlist->al_value);

    buflen -= need;

    patlist = (svrattrl *)GET_NEXT(patlist->al_link);
    }  /* END while (patlist != NULL) */

  free_attrlist(&svlist);

  return(buf);
  }  /* END resc_to_string() */
// create complete priority sorted escort list for all active ships
// escorts - array of escort info
// num_escorts - number of escorts requests in field of active ships
//	  This will be culled to MAX_ESCORTS, selecting the top set from escorts
void hud_create_complete_escort_list(escort_info *escorts, int *num_escorts)
{
	ship_obj *so;
	object *objp;	

	// start with none on list
	*num_escorts = 0;

	int idx;

	// multiplayer dogfight
	if(MULTI_DOGFIGHT){
		for(idx=0; idx<MAX_PLAYERS; idx++){
			// break out of the loop when we have reached our max
			if ( *num_escorts == MAX_COMPLETE_ESCORT_LIST ) {
				mprintf(("exceeded max ships in big escort list\n"));
				break;
			}		

			// is this a valid player			
			if(MULTI_CONNECTED(Net_players[idx]) && !MULTI_OBSERVER(Net_players[idx]) && !MULTI_STANDALONE(Net_players[idx])){
				// add the ship
				escorts[*num_escorts].objnum = -1;
				escorts[*num_escorts].obj_signature = -1;
				escorts[*num_escorts].priority = -1;
				escorts[*num_escorts].np_id = Net_players[idx].player_id;
				escorts[*num_escorts].escort_hit_timer = 0;
				escorts[*num_escorts].escort_hit_next_flash = 0;
				escorts[*num_escorts].escort_show_bright = false;
				(*num_escorts)++;
			}
		}
	}
	// all others 
	else {
		for ( so = GET_FIRST(&Ship_obj_list); so != END_OF_LIST(&Ship_obj_list); so = GET_NEXT(so) ) {
			Assert( so->objnum >= 0 && so->objnum < MAX_OBJECTS);
			if((so->objnum < 0) || (so->objnum >= MAX_OBJECTS)){
				continue;
			}
			objp = &Objects[so->objnum];
			Assert( objp->type == OBJ_SHIP );
			if(objp->type != OBJ_SHIP){
				continue;
			}

			// break out of the loop when we have reached our max
			if ( *num_escorts == MAX_COMPLETE_ESCORT_LIST ) {
				mprintf(("exceeded max ships in big escort list\n"));
				break;
			}		
			
			// only process ships that might be on the list
			if ( !(Ships[objp->instance].flags & SF_ESCORT) ){
				continue;
			}

			// only process ships that can be seen by sensors
			if ( (Ships[objp->instance].flags & SF_HIDDEN_FROM_SENSORS) ){
				continue;
			}

			// don't process most stealth ships
			if ( (Ships[objp->instance].flags2 & SF2_STEALTH) )
			{
				if ( Ships[objp->instance].team == Player_ship->team )
				{
					// friendly stealths are only not seen when explicitly specified
					if ( Ships[objp->instance].flags2 & SF2_FRIENDLY_STEALTH_INVIS )
					{
						continue;
					}
				}
				// non-friendly stealths are never seen
				else
				{
					continue;
				}
			}

			// don't process objects that should be dead
			if ( objp->flags & OF_SHOULD_BE_DEAD ) {
				continue;
			}

			// add the ship
			escorts[*num_escorts].objnum = so->objnum;
			escorts[*num_escorts].obj_signature = objp->signature;
			escorts[*num_escorts].priority = Ships[objp->instance].escort_priority;
			escorts[*num_escorts].np_id = -1;
			escorts[*num_escorts].escort_hit_timer = 0;
			escorts[*num_escorts].escort_hit_next_flash = 0;
			escorts[*num_escorts].escort_show_bright = false;
			(*num_escorts)++;			
		}
	}
}
Esempio n. 29
0
/**
 * Simulate a single shockwave.  If the shockwave radius exceeds outer_radius, then
 * delete the shockwave.
 *
 * @param shockwave_objp	object pointer that points to shockwave object
 * @param frametime			time to simulate shockwave
 */
void shockwave_move(object *shockwave_objp, float frametime)
{
    shockwave	*sw;
    object		*objp;
    float			blast,damage;
    int			i;

    Assert(shockwave_objp->type == OBJ_SHOCKWAVE);
    Assert(shockwave_objp->instance  >= 0 && shockwave_objp->instance < MAX_SHOCKWAVES);
    sw = &Shockwaves[shockwave_objp->instance];

    // if the shockwave has a delay on it
    if(sw->delay_stamp != -1) {
        if(timestamp_elapsed(sw->delay_stamp)) {
            sw->delay_stamp = -1;
        } else {
            return;
        }
    }

    sw->time_elapsed += frametime;

    shockwave_set_framenum(shockwave_objp->instance);

    sw->radius += (frametime * sw->speed);
    if ( sw->radius > sw->outer_radius ) {
        sw->radius = sw->outer_radius;
        shockwave_objp->flags |= OF_SHOULD_BE_DEAD;
        return;
    }

    // blast ships and asteroids
    // And (some) weapons
    for ( objp = GET_FIRST(&obj_used_list); objp !=END_OF_LIST(&obj_used_list); objp = GET_NEXT(objp) ) {
        if ( (objp->type != OBJ_SHIP) && (objp->type != OBJ_ASTEROID) && (objp->type != OBJ_WEAPON)) {
            continue;
        }

        if ( objp->type == OBJ_WEAPON ) {
            // only apply to missiles with hitpoints
            weapon_info* wip = &Weapon_info[Weapons[objp->instance].weapon_info_index];
            if (wip->weapon_hitpoints <= 0 || !(wip->wi_flags2 & WIF2_TAKES_SHOCKWAVE_DAMAGE))
                continue;
            if (sw->weapon_info_index >= 0) {
                if (Weapon_info[sw->weapon_info_index].wi_flags2 & WIF2_CIWS) {
                    continue;
                }
            }
        }


        if ( objp->type == OBJ_SHIP ) {
            // don't blast navbuoys
            if ( ship_get_SIF(objp->instance) & SIF_NAVBUOY ) {
                continue;
            }
        }

        // only apply damage to a ship once from a shockwave
        for ( i = 0; i < sw->num_objs_hit; i++ ) {
            if ( objp->signature == sw->obj_sig_hitlist[i] ) {
                break;
            }
        }

        if ( i < sw->num_objs_hit ) {
            continue;
        }

        if ( weapon_area_calc_damage(objp, &sw->pos, sw->inner_radius, sw->outer_radius, sw->blast, sw->damage, &blast, &damage, sw->radius) == -1 ) {
            continue;
        }

        // okay, we have damage applied, record the object signature so we don't repeatedly apply damage
        Assert(sw->num_objs_hit < SW_MAX_OBJS_HIT);
        if ( sw->num_objs_hit >= SW_MAX_OBJS_HIT) {
            sw->num_objs_hit--;
        }

        weapon_info* wip = NULL;

        switch(objp->type) {
        case OBJ_SHIP:
            sw->obj_sig_hitlist[sw->num_objs_hit++] = objp->signature;
            // If we're doing an AoE Electronics shockwave, do the electronics stuff. -MageKing17
            if ( (sw->weapon_info_index >= 0) && (Weapon_info[sw->weapon_info_index].wi_flags3 & WIF3_AOE_ELECTRONICS) && !(objp->flags & OF_INVULNERABLE) ) {
                weapon_do_electronics_effect(objp, &sw->pos, sw->weapon_info_index);
            }
            ship_apply_global_damage(objp, shockwave_objp, &sw->pos, damage );
            weapon_area_apply_blast(NULL, objp, &sw->pos, blast, 1);
            break;
        case OBJ_ASTEROID:
            asteroid_hit(objp, NULL, NULL, damage);
            break;
        case OBJ_WEAPON:
            wip = &Weapon_info[Weapons[objp->instance].weapon_info_index];
            if (wip->armor_type_idx >= 0)
                damage = Armor_types[wip->armor_type_idx].GetDamage(damage, shockwave_get_damage_type_idx(shockwave_objp->instance),1.0f);

            objp->hull_strength -= damage;
            if (objp->hull_strength < 0.0f) {
                Weapons[objp->instance].lifeleft = 0.01f;
                Weapons[objp->instance].weapon_flags |= WF_DESTROYED_BY_WEAPON;
            }
            break;
        default:
            Int3();
            break;
        }

        // If this shockwave hit the player, play shockwave impact sound
        if ( objp == Player_obj ) {
            float full_damage, vol_scale;
            if (sw->weapon_info_index >= 0) {
                full_damage = Weapon_info[sw->weapon_info_index].damage;
            } else {
                full_damage = sw->damage;
            }
            if (full_damage != 0.0f) {
                vol_scale = MAX(0.4f, damage/full_damage);
            } else {
                vol_scale = 1.0f;
            }
            snd_play( &Snds[SND_SHOCKWAVE_IMPACT], 0.0f, vol_scale );
        }

    }	// end for
}
Esempio n. 30
0
int send_request_to_remote_server(
    
  int            conn,
  batch_request *request,
  bool           close_handle)

  {
  struct attropl   *patrl;

  struct svrattrl  *psvratl;
  int               rc = PBSE_NONE;
  int               tmp_rc = PBSE_NONE;
  int               sock = 0;
  char              log_buf[LOCAL_LOG_BUF_SIZE];
  struct tcp_chan  *chan = NULL;
    
  pthread_mutex_lock(connection[conn].ch_mutex);
  sock = connection[conn].ch_socket;
  pthread_mutex_unlock(connection[conn].ch_mutex);
  
  request->rq_conn = sock;
  
  if ((chan = DIS_tcp_setup(sock)) == NULL)
    {
    log_err(PBSE_MEM_MALLOC, __func__,
      "Could not allocate memory for socket buffer");
    close_conn(sock, FALSE);
    return(PBSE_MEM_MALLOC);
    }

  /* the request is bound to another server, encode/send the request */
  switch (request->rq_type)
    {
    case PBS_BATCH_DeleteJob:
 
      rc = PBSD_mgr_put(
             conn,
             PBS_BATCH_DeleteJob,
             MGR_CMD_DELETE,
             MGR_OBJ_JOB,
             request->rq_ind.rq_delete.rq_objname,
             NULL,
             NULL);

      break;

    case PBS_BATCH_HoldJob:

      attrl_fixlink(&request->rq_ind.rq_hold.rq_orig.rq_attr);

      psvratl = (struct svrattrl *)GET_NEXT(request->rq_ind.rq_hold.rq_orig.rq_attr);

      patrl = &psvratl->al_atopl;

      rc = PBSD_mgr_put(
             conn,
             PBS_BATCH_HoldJob,
             MGR_CMD_SET,
             MGR_OBJ_JOB,
             request->rq_ind.rq_hold.rq_orig.rq_objname,
             patrl,
             NULL);

      break;

    case PBS_BATCH_CheckpointJob:

      rc = PBSD_mgr_put(
             conn,
             PBS_BATCH_CheckpointJob,
             MGR_CMD_SET,
             MGR_OBJ_JOB,
             request->rq_ind.rq_hold.rq_orig.rq_objname,
             NULL,
             NULL);

      break;

    case PBS_BATCH_GpuCtrl:

      rc = PBSD_gpu_put(
             conn,
             request->rq_ind.rq_gpuctrl.rq_momnode,
             request->rq_ind.rq_gpuctrl.rq_gpuid,
             request->rq_ind.rq_gpuctrl.rq_gpumode,
             request->rq_ind.rq_gpuctrl.rq_reset_perm,
             request->rq_ind.rq_gpuctrl.rq_reset_vol,
             NULL);

      break;

    case PBS_BATCH_MessJob:

      rc = PBSD_msg_put(
             conn,
             request->rq_ind.rq_message.rq_jid,
             request->rq_ind.rq_message.rq_file,
             request->rq_ind.rq_message.rq_text,
             NULL);

      break;

    case PBS_BATCH_ModifyJob:

    case PBS_BATCH_AsyModifyJob:

      attrl_fixlink(&request->rq_ind.rq_modify.rq_attr);

      patrl = (struct attropl *) & ((struct svrattrl *)GET_NEXT(
                                      request->rq_ind.rq_modify.rq_attr))->al_atopl;

      rc = PBSD_mgr_put(
             conn,
             request->rq_type,
             MGR_CMD_SET,
             MGR_OBJ_JOB,
             request->rq_ind.rq_modify.rq_objname,
             patrl,
             NULL);

      break;

    case PBS_BATCH_Rerun:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_Rerun, msg_daemonname)))
        break;

      if ((rc = encode_DIS_JobId(chan, request->rq_ind.rq_rerun)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, 0)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;

    case PBS_BATCH_RegistDep:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_RegistDep, msg_daemonname)))
        break;

      if ((rc = encode_DIS_Register(chan, request)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, 0)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;

    case PBS_BATCH_AsySignalJob:

    case PBS_BATCH_SignalJob:

      rc = PBSD_sig_put(
             conn,
             (char *)request->rq_ind.rq_signal.rq_jid,
             (char *)request->rq_ind.rq_signal.rq_signame,
             (char *)request->rq_extra);

      break;

    case PBS_BATCH_StatusJob:

      rc = PBSD_status_put(
             conn,
             PBS_BATCH_StatusJob,
             request->rq_ind.rq_status.rq_id,
             NULL,
             NULL);

      break;

    case PBS_BATCH_TrackJob:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_TrackJob, msg_daemonname)))
        break;

      if ((rc = encode_DIS_TrackJob(chan, request)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, 0)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;

    case PBS_BATCH_ReturnFiles:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_ReturnFiles, msg_daemonname)))
        break;

      if ((rc = encode_DIS_ReturnFiles(chan, request)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, 0)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;

    case PBS_BATCH_CopyFiles:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_CopyFiles, msg_daemonname)))
        break;

      if ((rc = encode_DIS_CopyFiles(chan, request)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, 0)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;

    case PBS_BATCH_DelFiles:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_DelFiles, msg_daemonname)))
        break;

      if ((rc = encode_DIS_CopyFiles(chan, request)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, 0)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;

    case PBS_BATCH_DeleteReservation:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_DeleteReservation, msg_daemonname)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, request->rq_extend)))
        break;

      rc = DIS_tcp_wflush(chan);
      
      break;

    case PBS_BATCH_ChangePowerState:

      if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_ChangePowerState, msg_daemonname)))
        break;

      if ((rc = encode_DIS_PowerState(chan, request->rq_ind.rq_powerstate)))
        break;

      if ((rc = encode_DIS_ReqExtend(chan, request->rq_extend)))
        break;

      rc = DIS_tcp_wflush(chan);

      break;


    default:

      sprintf(log_buf, msg_issuebad, request->rq_type);

      log_err(-1, __func__, log_buf);

      rc = -1;

      break;
    }  /* END switch (request->rq_type) */

  if ((tmp_rc = DIS_reply_read(chan, &request->rq_reply)) != PBSE_NONE)
    {
    sprintf(log_buf, "DIS_reply_read failed: %d", tmp_rc);
    log_record(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf);
    request->rq_reply.brp_code = tmp_rc;
    request->rq_reply.brp_choice = BATCH_REPLY_CHOICE_NULL;
    }

  DIS_tcp_cleanup(chan);

  if (close_handle == true)
    svr_disconnect(conn);

  return(rc);
  } /* END send_request_to_remote_server() */