int reply_send_async(struct batch_request *request) { int sfds = request->rq_conn; /* socket */ // only thread client responses if (svr_conn[sfds].cn_active != FromClientDIS) return reply_send(request); /* determine where the reply should go, remote or local */ if (sfds == PBS_LOCAL_CONNECTION) // default to synchronous version return reply_send(request); else if (sfds >= 0) { int rc = dis_reply_write_async(sfds, &request->rq_reply); if ((request->rq_type != PBS_BATCH_AsyModifyJob) || (request->rq_noreply == TRUE)) { free_br(request); } return rc; } /* Otherwise, the reply is to be sent to a remote client */ return 0; }
void reply_text( batch_request *preq, int code, const char *text) /* I */ { if (preq->rq_reply.brp_choice != BATCH_REPLY_CHOICE_NULL) { /* in case another reply was being built up, clean it out */ reply_free(&preq->rq_reply); } preq->rq_reply.brp_code = code; preq->rq_reply.brp_auxcode = 0; if ((text != NULL) && (text[0] != '\0')) { preq->rq_reply.brp_choice = BATCH_REPLY_CHOICE_Text; preq->rq_reply.brp_un.brp_txt.brp_str = strdup(text); preq->rq_reply.brp_un.brp_txt.brp_txtlen = strlen(text); } else { preq->rq_reply.brp_choice = BATCH_REPLY_CHOICE_NULL; } reply_send(preq); return; } /* END reply_text() */
void reply_ack(struct batch_request *preq) { preq->rq_reply.brp_code = PBSE_NONE; preq->rq_reply.brp_auxcode = 0; preq->rq_reply.brp_choice = BATCH_REPLY_CHOICE_NULL; reply_send(preq); }
void req_stat_que(struct batch_request *preq) { char *name; pbs_queue *pque; struct batch_reply *preply; int rc = 0; int type = 0; /* * first, validate the name of the requested object, either * a queue, or null for all queues */ name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name =='@')) type = 1; else { pque = find_queuebyname(name); #ifdef NAS /* localmod 075 */ if (pque == NULL) pque = find_resvqueuebyname(name); #endif /* localmod 075 */ if (pque == NULL) { req_reject(PBSE_UNKQUE, 0, preq); return; } } preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the one named queue */ rc = status_que(pque, preq, &preply->brp_un.brp_status); } else { /* get status of queues */ pque = (pbs_queue *)GET_NEXT(svr_queues); while (pque) { rc = status_que(pque, preq, &preply->brp_un.brp_status); if (rc != 0) { if (rc == PBSE_PERM) rc = 0; else break; } pque = (pbs_queue *)GET_NEXT(pque->qu_link); } } if (rc) { (void)reply_free(preply); req_reject(rc, bad, preq); } else { (void)reply_send(preq); } }
void req_stat_resv(struct batch_request * preq) { char *name; struct batch_reply *preply; resc_resv *presv = NULL; int rc = 0; int type = 0; /* * first, validate the name sent in the request. * This is either the ID of a specific reservation * or a '\0' or "@..." for all reservations. */ name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name =='@')) type = 1; else { presv = find_resv(name); if (presv == NULL) { req_reject(PBSE_UNKRESVID, 0, preq); return; } } preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the specifically named reservation */ rc = status_resv(presv, preq, &preply->brp_un.brp_status); } else { /* get status of all the reservations */ presv = (resc_resv *)GET_NEXT(svr_allresvs); while (presv) { rc = status_resv(presv, preq, &preply->brp_un.brp_status); if (rc == PBSE_PERM) rc = 0; if (rc) break; presv = (resc_resv *)GET_NEXT(presv->ri_allresvs); } } if (rc == 0) (void)reply_send(preq); else req_reject(rc, bad, preq); }
int reply_jobid( struct batch_request *preq, char *jobid, int which) { preq->rq_reply.brp_code = 0; preq->rq_reply.brp_auxcode = 0; preq->rq_reply.brp_choice = which; strncpy(preq->rq_reply.brp_un.brp_jid, jobid, PBS_MAXSVRJOBID); return(reply_send(preq)); } /* END reply_jobid() */
int reply_jobid( struct batch_request *preq, char *jobid, int which) { preq->rq_reply.brp_code = 0; preq->rq_reply.brp_auxcode = 0; preq->rq_reply.brp_choice = which; snprintf(preq->rq_reply.brp_un.brp_jid, sizeof(preq->rq_reply.brp_un.brp_jid), "%s", jobid); return(reply_send(preq)); } /* END reply_jobid() */
void req_stat_svr(struct batch_request *preq) { svrattrl *pal; struct batch_reply *preply; struct brp_status *pstat; /* update count and state counts from sv_numjobs and sv_jobstates */ server.sv_attr[(int)SRV_ATR_TotalJobs].at_val.at_long = server.sv_qs.sv_numjobs; server.sv_attr[(int)SRV_ATR_TotalJobs].at_flags |= ATR_VFLAG_SET|ATR_VFLAG_MODCACHE; update_state_ct(&server.sv_attr[(int)SRV_ATR_JobsByState], server.sv_jobstates, server.sv_jobstbuf); update_license_ct(&server.sv_attr[(int)SRV_ATR_license_count], server.sv_license_ct_buf); /* allocate a reply structure and a status sub-structure */ preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); pstat = (struct brp_status *)malloc(sizeof(struct brp_status)); if (pstat == NULL) { reply_free(preply); req_reject(PBSE_SYSTEM, 0, preq); return; } CLEAR_LINK(pstat->brp_stlink); (void)strcpy(pstat->brp_objname, server_name); pstat->brp_objtype = MGR_OBJ_SERVER; CLEAR_HEAD(pstat->brp_attr); append_link(&preply->brp_un.brp_status, &pstat->brp_stlink, pstat); /* add attributes to the status reply */ bad = 0; pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); if (status_attrib(pal, svr_attr_def, server.sv_attr, SRV_ATR_LAST, preq->rq_perm, &pstat->brp_attr, &bad)) reply_badattr(PBSE_NOATTR, bad, pal, preq); else (void)reply_send(preq); }
static void post_py_spawn_req(struct work_task *pwt) { struct batch_request *preq; char tmp_buf[128] = ""; if (pwt->wt_aux2 != 1) /* not rpp */ svr_disconnect(pwt->wt_event); /* close connection to MOM */ preq = pwt->wt_parm1; preq->rq_conn = preq->rq_orgconn; /* restore socket to client */ if (preq->rq_reply.brp_code == 0) sprintf(tmp_buf, " exit value %d", preq->rq_reply.brp_auxcode); sprintf(log_buffer, "Python spawn status %d%s", preq->rq_reply.brp_code, tmp_buf); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, LOG_INFO, preq->rq_ind.rq_py_spawn.rq_jid, log_buffer); reply_send(preq); }
void req_stat_sched(struct batch_request *preq) { svrattrl *pal; struct batch_reply *preply; int rc = 0; pbs_sched *psched; /* allocate a reply structure and a status sub-structure */ preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); for (psched = (pbs_sched *) GET_NEXT(svr_allscheds); (psched != NULL); psched = (pbs_sched *) GET_NEXT(psched->sc_link) ) { rc = status_sched(psched, preq, &preply->brp_un.brp_status); if (rc != 0) { break; } } if (!rc) { (void)reply_send(preq); } else { if (rc != PBSE_NOATTR) req_reject(rc, 0, preq); else { pal = (svrattrl *)GET_NEXT(preq->rq_ind. rq_status.rq_attr); reply_badattr(rc, bad, pal, preq); } } }
void req_stat_que( struct batch_request *preq) /* ptr to the decoded request */ { char *name; pbs_queue *pque = NULL; struct batch_reply *preply; int rc = 0; int type = 0; /* * first, validate the name of the requested object, either * a queue, or null for all queues */ name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name == '@')) { type = 1; } else { pque = find_queuebyname(name); if (pque == NULL) { req_reject(PBSE_UNKQUE, 0, preq, NULL, "cannot locate queue"); return; } } preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the named queue */ rc = status_que(pque, preq, &preply->brp_un.brp_status); } else { /* get status of all queues */ pque = (pbs_queue *)GET_NEXT(svr_queues); while (pque != NULL) { rc = status_que(pque, preq, &preply->brp_un.brp_status); if (rc != 0) { if (rc != PBSE_PERM) break; rc = 0; } pque = (pbs_queue *)GET_NEXT(pque->qu_link); } } if (rc != 0) { reply_free(preply); req_reject(rc, bad, preq, NULL, "status_queue failed"); } else { reply_send(preq); } return; } /* END req_stat_que() */
void req_stat_svr( struct batch_request *preq) /* ptr to the decoded request */ { svrattrl *pal; struct batch_reply *preply; struct brp_status *pstat; int *nc; static char nc_buf[128]; /* update count and state counts from sv_numjobs and sv_jobstates */ server.sv_attr[(int)SRV_ATR_TotalJobs].at_val.at_long = server.sv_qs.sv_numjobs; server.sv_attr[(int)SRV_ATR_TotalJobs].at_flags |= ATR_VFLAG_SET; update_state_ct( &server.sv_attr[(int)SRV_ATR_JobsByState], server.sv_jobstates, server.sv_jobstbuf); nc = netcounter_get(); sprintf(nc_buf, "%d %d %d", *nc, *(nc + 1), *(nc + 2)); server.sv_attr[(int)SRV_ATR_NetCounter].at_val.at_str = nc_buf; server.sv_attr[(int)SRV_ATR_NetCounter].at_flags |= ATR_VFLAG_SET; /* allocate a reply structure and a status sub-structure */ preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); pstat = (struct brp_status *)malloc(sizeof(struct brp_status)); if (pstat == NULL) { reply_free(preply); req_reject(PBSE_SYSTEM, 0, preq, NULL, NULL); return; } CLEAR_LINK(pstat->brp_stlink); strcpy(pstat->brp_objname, server_name); pstat->brp_objtype = MGR_OBJ_SERVER; CLEAR_HEAD(pstat->brp_attr); append_link(&preply->brp_un.brp_status, &pstat->brp_stlink, pstat); /* add attributes to the status reply */ bad = 0; pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); if (status_attrib( pal, svr_attr_def, server.sv_attr, SRV_ATR_LAST, preq->rq_perm, &pstat->brp_attr, &bad, 1)) /* IsOwner == TRUE */ { reply_badattr(PBSE_NOATTR, bad, pal, preq); } else { reply_send(preq); } return; } /* END req_stat_svr() */
void free_br(struct batch_request *preq) { delete_link(&preq->rq_link); reply_free(&preq->rq_reply); if (preq->rq_parentbr) { /* * have a parent who has the original info, so we cannot * free any data malloc-ed outside of the basic structure; * decrement the reference count in the parent and when it * goes to zero, reply_send() it */ if (preq->rq_parentbr->rq_refct > 0) { if (--preq->rq_parentbr->rq_refct == 0) reply_send(preq->rq_parentbr); } if (preq->rppcmd_msgid) free(preq->rppcmd_msgid); (void)free(preq); return; } /* * IMPORTANT - free any data that is malloc-ed outside of the * basic batch_request structure below here so it is not freed * when a copy of the structure (for a Array subjob) is freed */ if (preq->rq_extend) (void)free(preq->rq_extend); switch (preq->rq_type) { case PBS_BATCH_QueueJob: free_attrlist(&preq->rq_ind.rq_queuejob.rq_attr); break; case PBS_BATCH_JobCred: if (preq->rq_ind.rq_jobcred.rq_data) (void)free(preq->rq_ind.rq_jobcred.rq_data); break; case PBS_BATCH_UserCred: if (preq->rq_ind.rq_usercred.rq_data) (void)free(preq->rq_ind.rq_usercred.rq_data); break; case PBS_BATCH_GSS_Context: if (preq->rq_ind.rq_gssdata.rq_data) free(preq->rq_ind.rq_gssdata.rq_data); break; case PBS_BATCH_jobscript: if (preq->rq_ind.rq_jobfile.rq_data) (void)free(preq->rq_ind.rq_jobfile.rq_data); break; case PBS_BATCH_CopyHookFile: if (preq->rq_ind.rq_hookfile.rq_data) (void)free(preq->rq_ind.rq_hookfile.rq_data); break; case PBS_BATCH_HoldJob: freebr_manage(&preq->rq_ind.rq_hold.rq_orig); break; case PBS_BATCH_MessJob: if (preq->rq_ind.rq_message.rq_text) (void)free(preq->rq_ind.rq_message.rq_text); break; case PBS_BATCH_RelnodesJob: if (preq->rq_ind.rq_relnodes.rq_node_list) (void)free(preq->rq_ind.rq_relnodes.rq_node_list); break; case PBS_BATCH_PySpawn: arrayfree(preq->rq_ind.rq_py_spawn.rq_argv); arrayfree(preq->rq_ind.rq_py_spawn.rq_envp); break; case PBS_BATCH_ModifyJob: case PBS_BATCH_ModifyResv: freebr_manage(&preq->rq_ind.rq_modify); break; case PBS_BATCH_RunJob: case PBS_BATCH_AsyrunJob: case PBS_BATCH_StageIn: case PBS_BATCH_ConfirmResv: if (preq->rq_ind.rq_run.rq_destin) (void)free(preq->rq_ind.rq_run.rq_destin); break; case PBS_BATCH_StatusJob: case PBS_BATCH_StatusQue: case PBS_BATCH_StatusNode: case PBS_BATCH_StatusSvr: case PBS_BATCH_StatusSched: case PBS_BATCH_StatusHook: case PBS_BATCH_StatusRsc: case PBS_BATCH_StatusResv: if (preq->rq_ind.rq_status.rq_id) free(preq->rq_ind.rq_status.rq_id); free_attrlist(&preq->rq_ind.rq_status.rq_attr); break; case PBS_BATCH_CopyFiles: case PBS_BATCH_DelFiles: freebr_cpyfile(&preq->rq_ind.rq_cpyfile); break; case PBS_BATCH_CopyFiles_Cred: case PBS_BATCH_DelFiles_Cred: freebr_cpyfile_cred(&preq->rq_ind.rq_cpyfile_cred); break; case PBS_BATCH_MvJobFile: if (preq->rq_ind.rq_jobfile.rq_data) free(preq->rq_ind.rq_jobfile.rq_data); break; #ifndef PBS_MOM /* Server Only */ case PBS_BATCH_SubmitResv: free_attrlist(&preq->rq_ind.rq_queuejob.rq_attr); break; case PBS_BATCH_Manager: freebr_manage(&preq->rq_ind.rq_manager); break; case PBS_BATCH_ReleaseJob: freebr_manage(&preq->rq_ind.rq_release); break; case PBS_BATCH_Rescq: case PBS_BATCH_ReserveResc: case PBS_BATCH_ReleaseResc: free_rescrq(&preq->rq_ind.rq_rescq); break; case PBS_BATCH_DefSchReply: free(preq->rq_ind.rq_defrpy.rq_id); free(preq->rq_ind.rq_defrpy.rq_txt); break; case PBS_BATCH_SelectJobs: case PBS_BATCH_SelStat: free_attrlist(&preq->rq_ind.rq_select.rq_selattr); free_attrlist(&preq->rq_ind.rq_select.rq_rtnattr); break; #endif /* PBS_MOM */ } if (preq->rppcmd_msgid) free(preq->rppcmd_msgid); (void)free(preq); }
static void req_stat_job_step2( struct stat_cntl *cntl) /* I/O (freed on return) */ { svrattrl *pal; job *pjob = NULL; struct batch_request *preq; struct batch_reply *preply; int rc = 0; enum TJobStatTypeEnum type; pbs_queue *pque = NULL; int exec_only = 0; int IsTruncated = 0; long DTime; /* delta time - only report full attribute list if J->MTime > DTime */ static svrattrl *dpal = NULL; int job_array_index = 0; job_array *pa = NULL; preq = cntl->sc_origrq; type = (enum TJobStatTypeEnum)cntl->sc_type; preply = &preq->rq_reply; /* See pbs_server_attributes(1B) for details on "poll_jobs" behaviour */ /* NOTE: If IsTruncated is true, should walk all queues and walk jobs in each queue until max_reported is reached (NYI) */ if (dpal == NULL) { /* build 'delta' attribute list */ svrattrl *tpal; tlist_head dalist; int aindex; int atrlist[] = { JOB_ATR_jobname, JOB_ATR_resc_used, JOB_ATR_LAST }; CLEAR_LINK(dalist); for (aindex = 0;atrlist[aindex] != JOB_ATR_LAST;aindex++) { if ((tpal = attrlist_create("", "", 23)) == NULL) { return; } tpal->al_valln = atrlist[aindex]; if (dpal == NULL) dpal = tpal; append_link(&dalist, &tpal->al_link, tpal); } } /* END if (dpal == NULL) */ if (type == tjstArray) { pa = get_array(preq->rq_ind.rq_status.rq_id); } if (!server.sv_attr[(int)SRV_ATR_PollJobs].at_val.at_long) { /* polljobs not set - indicates we may need to obtain fresh data from MOM */ if (cntl->sc_jobid[0] == '\0') pjob = NULL; else pjob = find_job(cntl->sc_jobid); while (1) { if (pjob == NULL) { /* start from the first job */ if (type == tjstJob) { pjob = find_job(preq->rq_ind.rq_status.rq_id); } else if (type == tjstQueue) { pjob = (job *)GET_NEXT(cntl->sc_pque->qu_jobs); } else if (type == tjstArray) { job_array_index = 0; /* increment job_array_index until we find a non-null pointer or hit the end */ while (job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL) job_array_index++; } else { if ((type == tjstTruncatedServer) || (type == tjstTruncatedQueue)) IsTruncated = TRUE; pjob = (job *)GET_NEXT(svr_alljobs); } } /* END if (pjob == NULL) */ else { /* get next job */ if (type == tjstJob) break; if (type == tjstQueue) pjob = (job *)GET_NEXT(pjob->ji_jobque); else pjob = (job *)GET_NEXT(pjob->ji_alljobs); if (type == tjstArray) { pjob = NULL; /* increment job_array_index until we find a non-null pointer or hit the end */ while (++job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL) ; } } if (pjob == NULL) break; /* PBS_RESTAT_JOB defaults to 30 seconds */ if ((pjob->ji_qs.ji_substate == JOB_SUBSTATE_RUNNING) && ((time_now - pjob->ji_momstat) > JobStatRate)) { /* go to MOM for status */ strcpy(cntl->sc_jobid, pjob->ji_qs.ji_jobid); if ((rc = stat_to_mom(pjob, cntl)) == PBSE_SYSTEM) { break; } if (rc != 0) { rc = 0; continue; } return; /* will pick up after mom replies */ } } /* END while(1) */ if (cntl->sc_conn >= 0) svr_disconnect(cntl->sc_conn); /* close connection to MOM */ if (rc != 0) { free(cntl); reply_free(preply); req_reject(rc, 0, preq, NULL, "cannot get update from mom"); return; } } /* END if (!server.sv_attr[(int)SRV_ATR_PollJobs].at_val.at_long) */ /* * now ready for part 3, building the status reply, * loop through again */ if (type == tjstSummarizeArraysQueue || type == tjstSummarizeArraysServer) { update_array_statuses(); } if (type == tjstJob) pjob = find_job(preq->rq_ind.rq_status.rq_id); else if (type == tjstQueue) pjob = (job *)GET_NEXT(cntl->sc_pque->qu_jobs); else if (type == tjstSummarizeArraysQueue) pjob = (job *)GET_NEXT(cntl->sc_pque->qu_jobs_array_sum); else if (type == tjstSummarizeArraysServer) pjob = (job *)GET_NEXT(svr_jobs_array_sum); else if (type == tjstArray) { job_array_index = 0; pjob = NULL; /* increment job_array_index until we find a non-null pointer or hit the end */ while (job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL) job_array_index++; } else pjob = (job *)GET_NEXT(svr_alljobs); DTime = 0; if (preq->rq_extend != NULL) { char *ptr; /* FORMAT: { EXECQONLY | DELTA:<EPOCHTIME> } */ if (strstr(preq->rq_extend, EXECQUEONLY)) exec_only = 1; ptr = strstr(preq->rq_extend, "DELTA:"); if (ptr != NULL) { ptr += strlen("delta:"); DTime = strtol(ptr, NULL, 10); } } free(cntl); if ((type == tjstTruncatedServer) || (type == tjstTruncatedQueue)) { long sentJobCounter; long qjcounter; long qmaxreport; /* loop through all queues */ for (pque = (pbs_queue *)GET_NEXT(svr_queues); pque != NULL; pque = (pbs_queue *)GET_NEXT(pque->qu_link)) { qjcounter = 0; if ((exec_only == 1) && (pque->qu_qs.qu_type != QTYPE_Execution)) { /* ignore routing queues */ continue; } if (((pque->qu_attr[QA_ATR_MaxReport].at_flags & ATR_VFLAG_SET) != 0) && (pque->qu_attr[QA_ATR_MaxReport].at_val.at_long >= 0)) { qmaxreport = pque->qu_attr[QA_ATR_MaxReport].at_val.at_long; } else { qmaxreport = TMAX_JOB; } if (LOGLEVEL >= 5) { sprintf(log_buffer,"giving scheduler up to %ld idle jobs in queue %s\n", qmaxreport, pque->qu_qs.qu_name); log_event( PBSEVENT_SYSTEM, PBS_EVENTCLASS_QUEUE, pque->qu_qs.qu_name, log_buffer); } sentJobCounter = 0; /* loop through jobs in queue */ for (pjob = (job *)GET_NEXT(pque->qu_jobs); pjob != NULL; pjob = (job *)GET_NEXT(pjob->ji_jobque)) { if ((qjcounter >= qmaxreport) && (pjob->ji_qs.ji_state == JOB_STATE_QUEUED)) { /* max_report of queued jobs reached for queue */ continue; } pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); rc = status_job( pjob, preq, (pjob->ji_wattr[(int)JOB_ATR_mtime].at_val.at_long >= DTime) ? pal : dpal, &preply->brp_un.brp_status, &bad); if ((rc != 0) && (rc != PBSE_PERM)) { req_reject(rc, bad, preq, NULL, NULL); return; } sentJobCounter++; if (pjob->ji_qs.ji_state == JOB_STATE_QUEUED) qjcounter++; } /* END for (pjob) */ if (LOGLEVEL >= 5) { sprintf(log_buffer,"sent scheduler %ld total jobs for queue %s\n", sentJobCounter, pque->qu_qs.qu_name); log_event( PBSEVENT_SYSTEM, PBS_EVENTCLASS_QUEUE, pque->qu_qs.qu_name, log_buffer); } } /* END for (pque) */ reply_send(preq); return; } /* END if ((type == tjstTruncatedServer) || ...) */ while (pjob != NULL) { /* go ahead and build the status reply for this job */ if (exec_only) { pque = find_queuebyname(pjob->ji_qs.ji_queue); if (pque->qu_qs.qu_type != QTYPE_Execution) goto nextjob; } pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); rc = status_job( pjob, preq, pal, &preply->brp_un.brp_status, &bad); if ((rc != 0) && (rc != PBSE_PERM)) { req_reject(rc, bad, preq, NULL, NULL); return; } /* get next job */ nextjob: if (type == tjstJob) break; if (type == tjstQueue) pjob = (job *)GET_NEXT(pjob->ji_jobque); else if (type == tjstSummarizeArraysQueue) pjob = (job *)GET_NEXT(pjob->ji_jobque_array_sum); else if (type == tjstSummarizeArraysServer) pjob = (job *)GET_NEXT(pjob->ji_jobs_array_sum); else if (type == tjstArray) { pjob = NULL; /* increment job_array_index until we find a non-null pointer or hit the end */ while (++job_array_index < pa->ai_qs.array_size && (pjob = pa->jobs[job_array_index]) == NULL) ; } else pjob = (job *)GET_NEXT(pjob->ji_alljobs); rc = 0; } /* END while (pjob != NULL) */ reply_send(preq); if (LOGLEVEL >= 7) { log_event(PBSEVENT_SYSTEM, PBS_EVENTCLASS_JOB, "req_statjob", "Successfully returned the status of queued jobs\n"); } return; } /* END req_stat_job_step2() */
void req_rerunjob(struct batch_request *preq) { int anygood = 0; int i; int j; char *jid; int jt; /* job type */ int offset; char *pc; job *pjob; job *parent; char *range; char *vrange; int x, y, z; jid = preq->rq_ind.rq_signal.rq_jid; parent = chk_job_request(jid, preq, &jt); if (parent == (job *)0) return; /* note, req_reject already called */ if ((preq->rq_perm & (ATR_DFLAG_MGWR | ATR_DFLAG_OPWR)) == 0) { req_reject(PBSE_PERM, 0, preq); return; } if (jt == IS_ARRAY_NO) { /* just a regular job, pass it on down the line and be done */ req_rerunjob2(preq, parent); return; } else if (jt == IS_ARRAY_Single) { /* single subjob, if running can signal */ offset = subjob_index_to_offset(parent, get_index_from_jid(jid)); if (offset == -1) { req_reject(PBSE_UNKJOBID, 0, preq); return; } i = get_subjob_state(parent, offset); if (i == -1) { req_reject(PBSE_IVALREQ, 0, preq); return; } else if (i == JOB_STATE_RUNNING) { pjob = find_job(jid); /* get ptr to the subjob */ if (pjob) { req_rerunjob2(preq, pjob); } else { req_reject(PBSE_BADSTATE, 0, preq); return; } } else { req_reject(PBSE_BADSTATE, 0, preq); return; } return; } else if (jt == IS_ARRAY_ArrayJob) { /* The Array Job itself ... */ if (parent->ji_qs.ji_state != JOB_STATE_BEGUN) { req_reject(PBSE_BADSTATE, 0, preq); return; } /* for each subjob that is running, call req_rerunjob2 */ ++preq->rq_refct; /* protect the request/reply struct */ /* Setting deleted subjobs count to 0, * since all the deleted subjobs will be moved to Q state */ parent->ji_ajtrk->tkm_dsubjsct = 0; for (i=0; i<parent->ji_ajtrk->tkm_ct; i++) { if (get_subjob_state(parent, i) == JOB_STATE_RUNNING) { pjob = find_job(mk_subjob_id(parent, i)); if (pjob) { dup_br_for_subjob(preq, pjob, req_rerunjob2); } } else { set_subjob_tblstate(parent, i, JOB_STATE_QUEUED); } } /* if not waiting on any running subjobs, can reply; else */ /* it is taken care of when last running subjob responds */ if (--preq->rq_refct == 0) reply_send(preq); return; } /* what's left to handle is a range of subjobs, foreach subjob */ /* if running, all req_rerunjob2 */ range = get_index_from_jid(jid); if (range == NULL) { req_reject(PBSE_IVALREQ, 0, preq); return; } /* first check that all in the subrange are in fact running */ vrange = range; while (1) { if ((i = parse_subjob_index(vrange, &pc, &x, &y, &z, &j)) == -1) { req_reject(PBSE_IVALREQ, 0, preq); return; } else if (i == 1) break; while (x <= y) { i = numindex_to_offset(parent, x); if (i >= 0) { if (get_subjob_state(parent, i) == JOB_STATE_RUNNING) anygood++; } x += z; } vrange = pc; } if (anygood == 0) { req_reject(PBSE_BADSTATE, 0, preq); return; } /* now do the deed */ ++preq->rq_refct; /* protect the request/reply struct */ while (1) { if ((i = parse_subjob_index(range, &pc, &x, &y, &z, &j)) == -1) { req_reject(PBSE_IVALREQ, 0, preq); break; } else if (i == 1) break; while (x <= y) { i = numindex_to_offset(parent, x); if (i < 0) { x += z; continue; } if (get_subjob_state(parent, i) == JOB_STATE_RUNNING) { pjob = find_job(mk_subjob_id(parent, i)); if (pjob) { dup_br_for_subjob(preq, pjob, req_rerunjob2); } } x += z; } range = pc; } /* if not waiting on any running subjobs, can reply; else */ /* it is taken care of when last running subjob responds */ if (--preq->rq_refct == 0) reply_send(preq); return; }
void req_stat_job(struct batch_request *preq) { int at_least_one_success = 0; int dosubjobs = 0; int dohistjobs = 0; char *name; job *pjob = NULL; pbs_queue *pque = NULL; struct batch_reply *preply; int rc = 0; int type = 0; char *pnxtjid = NULL; /* check for any extended flag in the batch request. 't' for * the sub jobs. If 'x' is there, then check if the server is * configured for history job info. If not set or set to FALSE, * return with PBSE_JOBHISTNOTSET error. Otherwise select history * jobs. */ if (preq->rq_extend) { if (strchr(preq->rq_extend, (int)'t')) dosubjobs = 1; /* status sub jobs of an Array Job */ if (strchr(preq->rq_extend, (int)'x')) { if (svr_history_enable == 0) { req_reject(PBSE_JOBHISTNOTSET, 0, preq); return; } dohistjobs = 1; /* status history jobs */ } } /* * first, validate the name of the requested object, either * a job, a queue, or the whole server. * type = 1 for a job, Array job, subjob or range of subjobs, or * a comma separated list of the above. * 2 for jobs in a queue, * 3 for jobs in the server, or */ name = preq->rq_ind.rq_status.rq_id; if ( isdigit((int)*name) ) { /* a single job id */ type = 1; rc = PBSE_UNKJOBID; } else if ( isalpha((int)*name) ) { pque = find_queuebyname(name) /* status jobs in a queue */; #ifdef NAS /* localmod 075 */ if (pque == NULL) pque = find_resvqueuebyname(name); #endif /* localmod 075 */ if (pque) type = 2; else rc = PBSE_UNKQUE; } else if ((*name == '\0') || (*name == '@')) { type = 3; /* status all jobs at server */ } else rc = PBSE_IVALREQ; if (type == 0) { /* is invalid - an error */ req_reject(rc, 0, preq); return; } preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); rc = PBSE_NONE; if (type == 1) { /* * If there is more than one job id, any status for any * one job is returned, then no error is given. * If a single job id is requested and there is an error * the error is returned. */ pnxtjid = name; while ((name = parse_comma_string_r(&pnxtjid)) != NULL) { if ((rc = stat_a_jobidname(preq, name, dohistjobs, dosubjobs)) == PBSE_NONE) at_least_one_success = 1; } if (at_least_one_success == 1) reply_send(preq); else req_reject(rc, 0, preq); return; } else if (type == 2) { pjob = (job *)GET_NEXT(pque->qu_jobs); while (pjob && (rc == PBSE_NONE)) { rc = do_stat_of_a_job(preq, pjob, dohistjobs, dosubjobs); pjob = (job *)GET_NEXT(pjob->ji_jobque); } } else { pjob = (job *)GET_NEXT(svr_alljobs); while (pjob && (rc == PBSE_NONE)) { rc = do_stat_of_a_job(preq, pjob, dohistjobs, dosubjobs); pjob = (job *)GET_NEXT(pjob->ji_alljobs); } } if (rc && (rc != PBSE_PERM)) req_reject(rc, bad, preq); else reply_send(preq); }
void req_stat_node(struct batch_request *preq) { char *name; struct batch_reply *preply; svrattrl *pal; struct pbsnode *pnode = NULL; int rc = 0; int type = 0; int i; /* * first, check that the server indeed has a list of nodes * and if it does, validate the name of the requested object-- * either name is that of a spedific node, or name[0] is null/@ * meaning request is for all nodes in the server's jurisdiction */ if (pbsndlist == 0 || svr_totnodes <= 0) { req_reject(PBSE_NONODES, 0, preq); return; } resc_access_perm = preq->rq_perm; name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name =='@')) type = 1; else { pnode = find_nodebyname(name); if (pnode == NULL) { req_reject(PBSE_UNKNODE, 0, preq); return; } } preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the named node */ rc = status_node(pnode, preq, &preply->brp_un.brp_status); } else { /* get status of all nodes */ for (i = 0; i < svr_totnodes; i++) { pnode = pbsndlist[i]; rc = status_node(pnode, preq, &preply->brp_un.brp_status); if (rc) break; } } if (!rc) { (void)reply_send(preq); } else { if (rc != PBSE_UNKNODEATR) req_reject(rc, 0, preq); else { pal = (svrattrl *)GET_NEXT(preq->rq_ind. rq_status.rq_attr); reply_badattr(rc, bad, pal, preq); } } }
void req_stat_node( struct batch_request *preq) /* ptr to the decoded request */ { char *name; struct pbsnode *pnode = NULL; struct batch_reply *preply; svrattrl *pal; int rc = 0; int type = 0; int i; struct prop props; char *id = "req_stat_node"; /* * first, check that the server indeed has a list of nodes * and if it does, validate the name of the requested object-- * either name is that of a specific node, or name[0] is null/@ * meaning request is for all nodes in the server's jurisdiction */ if (LOGLEVEL >= 6) { log_record( PBSEVENT_SCHED, PBS_EVENTCLASS_REQUEST, id, "entered"); } if ((pbsndmast == NULL) || (svr_totnodes <= 0)) { req_reject(PBSE_NONODES, 0, preq, NULL, "node list is empty - check 'server_priv/nodes' file"); return; } name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name == '@')) { type = 1; } else if ((*name == ':') && (*(name + 1) != '\0')) { if (!strcmp(name + 1, "ALL")) { type = 1; /* psuedo-group for all nodes */ } else { type = 2; props.name = name + 1; props.mark = 1; props.next = NULL; } } else { pnode = find_nodebyname(name); if (pnode == NULL) { req_reject(PBSE_UNKNODE, 0, preq, NULL, "cannot locate specified node"); return; } } preply = &preq->rq_reply; preply->brp_choice = BATCH_REPLY_CHOICE_Status; CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the named node */ rc = status_node(pnode, preq, &preply->brp_un.brp_status); } else { /* get status of all or several nodes */ for (i = 0;i < svr_totnodes;i++) { pnode = pbsndmast[i]; if ((type == 2) && !hasprop(pnode, &props)) continue; if ((rc = status_node(pnode, preq, &preply->brp_un.brp_status)) != 0) break; } } if (!rc) { /* SUCCESS */ reply_send(preq); } else { if (rc != PBSE_UNKNODEATR) { req_reject(rc, 0, preq, NULL, NULL); } else { pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); reply_badattr(rc, bad, pal, preq); } } return; } /* END req_stat_node() */