static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id, jag_callbacks_t *callbacks) { jag_prec_t *prec = NULL; int pid = 0; if (!pgid_plugin) { pid_t *pids = NULL; int npids = 0; /* get only the processes in the proctrack container */ proctrack_g_get_pids(cont_id, &pids, &npids); if (!npids) { debug4("no pids in this container %"PRIu64"", cont_id); goto finished; } for (i = 0; i < npids; i++) { pid = pids[i]; if (!getprocs(&proc, sizeof(proc), 0, 0, &pid, 1)) continue; /* Assume the process went away */ prec = xmalloc(sizeof(prec_t)); list_append(prec_list, prec); prec->pid = proc.pi_pid; prec->ppid = proc.pi_ppid; prec->usec = proc.pi_ru.ru_utime.tv_sec + proc.pi_ru.ru_utime.tv_usec * 1e-6; prec->ssec = proc.pi_ru.ru_stime.tv_sec + proc.pi_ru.ru_stime.tv_usec * 1e-6; prec->pages = proc.pi_majflt; prec->rss = (proc.pi_trss + proc.pi_drss) * pagesize; //prec->rss *= 1024; prec->vsize = (proc.pi_tsize / 1024); prec->vsize += (proc.pi_dvm * pagesize); //prec->vsize *= 1024; /* debug("vsize = %f = (%d/1024)+(%d*%d)", */ /* prec->vsize, proc.pi_tsize, proc.pi_dvm, pagesize); */ } } else { while (getprocs(&proc, sizeof(proc), 0, 0, &pid, 1) == 1) { prec = xmalloc(sizeof(prec_t)); list_append(prec_list, prec); prec->pid = proc.pi_pid; prec->ppid = proc.pi_ppid; prec->usec = proc.pi_ru.ru_utime.tv_sec + proc.pi_ru.ru_utime.tv_usec * 1e-6; prec->ssec = proc.pi_ru.ru_stime.tv_sec + proc.pi_ru.ru_stime.tv_usec * 1e-6; prec->pages = proc.pi_majflt; prec->rss = (proc.pi_trss + proc.pi_drss) * pagesize; //prec->rss *= 1024; prec->vsize = (proc.pi_tsize / 1024); prec->vsize += (proc.pi_dvm * pagesize); //prec->vsize *= 1024; /* debug("vsize = %f = (%d/1024)+(%d*%d)", */ /* prec->vsize, proc.pi_tsize, proc.pi_dvm, pagesize); */ } } }
int main(void){ int max_process = 10; struct proc_entry * table = malloc(sizeof(struct proc_entry) * max_process); if(table == 0){ printf(1, "couldn't allocate mem?!!\n"); exit(); } max_process = getprocs(max_process, table); int i; for (i = 0; i < max_process; i++) { struct proc_entry *p = &table[i]; if(p->ppid == -1) //No parent - for init printf(1,"Pid: %d State: (%s), Size: %d, Name: %s, Nr Tickets: %d, Runs:%d\n",p->pid,p->state,p->sz,p->name,p->tickets,p->runs); else // Pid, parent pid, estado (string), size, name printf(1,"Pid: %d Parent Pid: %d State: (%s), Size: %d, Name: %s, Nr Tickets: %d, Run:%d\n",p->pid,p->ppid,p->state,p->sz,p->name,p->tickets,p->runs); } exit(); }
int main(int argc, char *argv[]) { int num_procs = getprocs(); printf(1,"number of procs=%d\n",num_procs); exit(); }
int main(){ printf(1,"PID=%d\n", getpid()); uint pid=fork(); switch(pid){ case 0:{ /* codigo del proceso hijo */ printf(1,"(%d): soy hijo de %d\n", getpid(), getppid()); break; } default:{ /* codigo del proceso padre */ printf(1,"(%d): soy el padre de %d y soy hijo de %d\n", getpid(), pid, getppid()); switch(fork()){ case 0:{ /* si estamos en el segundo hijo */ printf(1,"(%d): soy hijo de %d\n", getpid(), getppid()); break; } default:{ printf(1, "Cantidad de procesos: %d\n", getprocs()); break; } } break; } } while(1); //exit(0); exit(); }// end main()
///////////////////////////////////////////////////////////////////////////// // NAME : processdata // FUNCTION : it calls subroutine getprocs() to get information // about all processes. ///////////////////////////////////////////////////////////////////////////// // ARGUMENTS: // cnt: the number of the processes in the returned table // RETURN VALUES: // when it successfully gets the process table entries, // an array of procsinfo structures filled with process table // entries are returned. Otherewise, a null pointer is // returned. // ///////////////////////////////////////////////////////////////////////////// struct procsinfo *processdata(int *cnt) { struct procsinfo *proctable=NULL, *rtnp=NULL; int count=1048576, rtncnt, repeat=1, nextp=0; *cnt=0; while ( repeat && (rtncnt=getprocs(rtnp,PROCSIZE,0,0,&nextp, count))>0) { if (!rtnp) { count=rtncnt; proctable=(struct procsinfo *) malloc((size_t) \ PROCSIZE*count); if (!proctable) return NULL; rtnp=proctable; nextp=0; } else { *cnt+=rtncnt; if (rtncnt>=count) { proctable=(struct procsinfo *) realloc(\ (void*)proctable, (size_t)\ (PROCSIZE*(*cnt+count))); if (!proctable) return NULL; rtnp=proctable+(*cnt); } else repeat=0; } // end of if(!rtnp) } //end of while return proctable; }
int main(int argc, char *argv[]) { int i = getprocs(); printf(1, "The number of process is running:\t%d\n", i); exit(); }
/* Returns the procedure associated with name. */ NODE *procValue(NODE *name) { NODE *result, *parentList; result = assoc(name, getprocs(current_object)); if (result != NIL) return getobject(result); for (parentList = parent_list(current_object); parentList != NIL && result == NIL; parentList = cdr(parentList)) { result = assoc(name, getprocs(car(parentList))); } if (result != NIL) return getobject(result); result = intern(name); return procnode__caseobj(result); }
//xiqi int sys_getprocs(void) { int n; acquire(&tickslock); n = getprocs(); release(&tickslock); return n; }
/* Outputs TRUE if Symbol is the name of a procedure owned by the current * object, FALSE otherwise. * @params - Symbol */ NODE *lmyprocp(NODE *args) { NODE *arg; if (current_object == logo_object) return lprocedurep(args); /* return lprocp or just call it? */ else { arg = name_arg(args); if (NOT_THROWING) return torf(assoc(arg, getprocs(current_object)) != NIL); } return UNBOUND; }
int main(int argc, char *argv[]) { int pid, i; for (i = 0; i < 20; ++i) { pid = fork(); if (pid == 0) { while(1); } } assert((int) getprocs() == 23); printf(1, "TEST PASSED\n"); exit(); }
BOOL IsValidProcessEntry ( pid_t pid, time_t RegTime ) { #endif #ifdef PKCS64 pid_t Index = (pid_t)pid; #else pid_t Index = pid; #endif struct procsinfo64 ProcInfo[1]; /* getprocs wants arrays; I'm just being anal; I know it's stupid to declare an array of 1 element */ struct fdsinfo_2000 FileInfo[1]; /* if you pass a struct fdsinfo, you get a core dump */ int Count = 1; int Err; /* Note that Index is modified by this call; use pid to see what process id we're looking for afterwards */ if ( getprocs ( &(ProcInfo[0]), sizeof(ProcInfo), NULL, NULL, &Index, Count ) != Count ) { Err = errno; if ( Err == EINVAL ) { /* The process was not found */ DbgLog(DL3, "IsValidProcessEntry: PID %d was not found in the process table (getprocs() returned %s)", pid, SysConst(Err) ); return FALSE; } else { /* some other error occurred */ DbgLog(DL3,"IsValidProcessEntry: getprocs() returned %s (%d; %#x)", SysConst(Err), Err, Err); return FALSE; } } /* end if getprocs */ /* Okay, the process exists, now we see if it's really ours */ if ( ProcInfo[0].pi_pid == pid) { if ( RegTime >= ProcInfo[0].pi_start ) { return TRUE; } else { /* ProcInfo[0].pi_start contains the time at which the process began */ DbgLog(DL1, "IsValidProcessEntry: PID %d started at %lld; registered at %ld", pid, ProcInfo[0].pi_start, RegTime); DbgLog(DL4, "IsValidProcessEntry: PID Returned %d flags at %#x; state at %#x index %d", ProcInfo[0].pi_pid, ProcInfo[0].pi_flags, ProcInfo[0].pi_state,Index); } } return FALSE; }
int sh_count_procs(char *procname) { pid_t index; int count; char *sep; index = 0; count = 0; while(getprocs(&pinfo, sizeof(pinfo), NULL, 0, &index, 1) == 1) { strlcpy(pinfo_name, pinfo.pi_comm, sizeof(pinfo_name)); sep = strchr(pinfo_name, ' '); if(sep != NULL) *sep = 0; if(strcmp(procname, pinfo_name) == 0) count++; } return count; }
int main(int argc, char * argv[]) { struct uproc p[UPROC]; int size; size = getprocs(10, p); if (size <= 0) { printf(2, "Error: ps failed"); exit(); } int i; printf(1, "pid uid gid ppid state prio sz nm\n"); for (i = 0; i < size; ++i) { printf(1, "%d %d %d %d %s %d %d %s\n", p[i].pid, p[i].gid, p[i].uid, p[i].ppid, p[i].state, p[i].priority, p[i].size, p[i].name); } exit(); }
unsigned int kfi_getPid(const char *proc, unsigned int ppid) { bool error = false; unsigned int pid = 0; int i, count, idx = 0; struct procsinfo pi[MAX_PROCS]; while((count = getprocs(&pi, sizeof(pi[0]), 0, 0, &pid, 1)) > 0 && !error) { for(i = 0; i < count && !error; i++) if(pi[i].pi_ppid == ppid && pi[i].pi_comm && 0 == strcmp(pi[i].pi_comm, proc)) if(pid) error = true; else pid = pi[i].pi_pid; idx = pi[count - 1].pi_idx + 1; } return error ? 0 : pid; }
extern int _aix_get_dmem_info( PAPI_dmem_info_t * d ) { /* This function has been reimplemented to conform to current interface. It has not been tested. Nor has it been confirmed for completeness. dkt 05-10-06 */ struct procsinfo pi; pid_t mypid = getpid( ); pid_t pid; int found = 0; pid = 0; while ( 1 ) { if ( getprocs( &pi, sizeof ( pi ), 0, 0, &pid, 1 ) != 1 ) break; if ( mypid == pi.pi_pid ) { found = 1; break; } } if ( !found ) return ( PAPI_ESYS ); d->size = pi.pi_size; d->resident = pi.pi_drss + pi.pi_trss; d->high_water_mark = PAPI_EINVAL; d->shared = PAPI_EINVAL; d->text = pi.pi_trss; /* this is a guess */ d->library = PAPI_EINVAL; d->heap = PAPI_EINVAL; d->locked = PAPI_EINVAL; d->stack = PAPI_EINVAL; d->pagesize = getpagesize( ); return ( PAPI_OK ); }
int sh_count_procs(char *procname) { pid_t index; int count; char *sep; index = 0; count = 0; while(getprocs(&pinfo, sizeof(pinfo), NULL, 0, &index, 1) == 1) { strncpy(pinfo_name, pinfo.pi_comm, 256); pinfo_name[255] = 0; sep = strchr(pinfo_name, ' '); if(sep != NULL) *sep = 0; DEBUGMSGTL(("proc","aix Comparing wanted %s against %s\n", procname, pinfo_name)); if(strcmp(procname, pinfo_name) == 0) count++; } return count; }
void OS_get_table() { struct procsinfo64 *procs = NULL; int index = 0; int fetched = 0; struct timeval now_tval; /* allocate memory to hold the procs */ /* procs = New(0, procs, PROCS_TO_FETCH, struct procsinfo64); */ procs = (struct procsinfo64 *)malloc(sizeof(struct procsinfo64) * PROCS_TO_FETCH); if(NULL == procs) { /* fprintf(stderr, "cannot allocate memory in Proc::ProcessTable::OS_get_table!\n"); */ ppt_warn("cannot allocate memory in Proc::ProcessTable::OS_get_table!"); return; } /* get current time of day */ gettimeofday(&now_tval, 0); now_time = TVALU_TO_SEC(now_tval); /* keep on grabbing chunks of processes until getprocs returns a smaller block than we asked for */ while( (fetched = getprocs(procs, sizeof(struct procsinfo64), NULL, 0, &index, PROCS_TO_FETCH)) >= PROCS_TO_FETCH) { bless_procs(procs, fetched); } /* bless the last block of procs */ bless_procs(procs, fetched); /* release the memory */ Safefree(procs); return; }
int main(int argc, char *argv[]){ printf(1, "%d", getprocs()); exit(); }
rawTime64 pd_thread::getRawCpuTime_sw() { // returns user+sys time from the user area of the inferior process. // Since AIX 4.1 doesn't have a /proc file system, this is slightly // more complicated than solaris or the others. // It must not stop the inferior process or assume it is stopped. // It must be "in sync" with rtinst's DYNINSTgetCPUtime() // Idea number one: use getprocs() (which needs to be included anyway // because of a use above) to grab the process table info. // We probably want pi_ru.ru_utime and pi_ru.ru_stime. // int lwp_id: thread ID of desired time. Ignored for now. // int pid: process ID that we want the time for. // int getprocs (struct procsinfo *ProcessBuffer, // Array of procsinfos // int ProcessSize, // sizeof(procsinfo) // struct fdsinfo *FileBuffer, // Array of fdsinfos // int FileSize, // sizeof(...) // pid_t *IndexPointer, // Next PID after call // int Count); // How many to retrieve // Constant for the number of processes wanted in info const unsigned int numProcsWanted = 1; struct procsinfo procInfoBuf[numProcsWanted]; struct fdsinfo fdsInfoBuf[numProcsWanted]; int numProcsReturned; // The pid sent to getProcs() is modified, so make a copy pid_t wantedPid = pd_proc->getPid(); // We really don't need to recalculate the size of the structures // every call through here. The compiler should optimize these // to constants. const int sizeProcInfo = sizeof(struct procsinfo); const int sizeFdsInfo = sizeof(struct fdsinfo); numProcsReturned = getprocs(procInfoBuf, sizeProcInfo, fdsInfoBuf, sizeFdsInfo, &wantedPid, numProcsWanted); if (numProcsReturned == -1) // We have an error perror("Failure in getInferiorCPUtime"); // Now we have the process table information. Since there is no description // other than the header file, I've included descriptions of used fields. /* struct procsinfo { // valid when the process is a zombie only unsigned long pi_utime; / this process user time unsigned long pi_stime; / this process system time // accounting and profiling data unsigned long pi_start; // time at which process began struct rusage pi_ru; // this process' rusage info struct rusage pi_cru; // children's rusage info }; */ // Other things are included, but we don't need 'em here. // In addition, the fdsinfo returned is ignored, since we don't need // open file descriptor data. // This isn't great, since the returned time is in seconds run. It works // (horribly) for now, though. Multiply it by a million and we'll call // it a day. Back to the drawing board. // Get the time (user+system?) in seconds rawTime64 result = (rawTime64) procInfoBuf[0].pi_ru.ru_utime.tv_sec + // User time (rawTime64) procInfoBuf[0].pi_ru.ru_stime.tv_sec; // System time result *= I64_C(1000000); // It looks like the tv_usec fields are actually nanoseconds in this // case. If so, it's undocumented -- but I'm getting numbers like // "980000000" which is either 980 _million_ microseconds (i.e. 980sec) // or .98 seconds if the units are nanoseconds. // IF STRANGE RESULTS HAPPEN IN THE TIMERS, make sure that usec is // actually nanos, not micros. rawTime64 nanoseconds = (rawTime64) procInfoBuf[0].pi_ru.ru_utime.tv_usec + // User time (rawTime64) procInfoBuf[0].pi_ru.ru_stime.tv_usec; //System time result += (nanoseconds / 1000); if (result < sw_previous_) // Time ran backwards? { // When the process exits we often get a final time call. // If the result is 0(.0), don't print an error. if (result) { char errLine[150]; sprintf(errLine,"process::getRawCpuTime_sw - time going backwards in " "daemon - cur: %lld, prev: %lld\n", result, sw_previous_); cerr << errLine; pdlogLine(errLine); } result = sw_previous_; } else sw_previous_=result; return result; }
/* Outputs a list of the names of the procedures owned by (not inherited * by) the current object. * @params - none */ NODE *lmyprocs(NODE *args) { return getprocs(current_object); }
/* --------------------------------------------------------------------- */ int netsnmp_arch_swrun_container_load( netsnmp_container *container, u_int flags) { struct procsinfo *proc_table; pid_t proc_index = 0; int nprocs, rc, i; char *cp1, *cp2; netsnmp_swrun_entry *entry; /* * Create a buffer for the process table, based on the size of * the table the last time we loaded this information. * If this isn't big enough, keep increasing the size of the * table until we can retrieve the whole thing. */ proc_table = (struct procsinfo *) malloc(avail*(sizeof(struct procsinfo))); while ( avail == (nprocs = getprocs(proc_table, sizeof(struct procsinfo), 0, sizeof(struct fdsinfo), &proc_index, avail))) { avail += 1024; free( proc_table ); proc_table = (struct procsinfo *) malloc(avail*(sizeof(struct procsinfo))); } for ( i=0 ; i<nprocs; i++ ) { if (0 == proc_table[i].pi_state) continue; /* Skip unused entries */ entry = netsnmp_swrun_entry_create(proc_table[i].pi_pid); if (NULL == entry) continue; /* error already logged by function */ rc = CONTAINER_INSERT(container, entry); /* * Split pi_comm into two: * argv[0] is hrSWRunPath * argv[1..] is hrSWRunParameters */ for ( cp1 = proc_table[i].pi_comm; ' ' == *cp1; cp1++ ) ; *cp1 = '\0'; /* End of argv[0] */ entry->hrSWRunPath_len = snprintf(entry->hrSWRunPath, sizeof(entry->hrSWRunPath)-1, "%s", proc_table[i].pi_comm); /* * Set hrSWRunName to be the last component of hrSWRunPath */ cp2 = strrchr( entry->hrSWRunPath, '/' ); if (cp2) cp2++; /* Find the final component ... */ else cp2 = entry->hrSWRunPath; /* ... if any */ entry->hrSWRunName_len = snprintf(entry->hrSWRunName, sizeof(entry->hrSWRunName)-1, "%s", cp2); entry->hrSWRunParameters_len = snprintf(entry->hrSWRunParameters, sizeof(entry->hrSWRunParameters)-1, "%s", cp1+1); entry->hrSWRunType = (SKPROC & proc_table[i].pi_flags) ? 2 /* kernel process */ : 4 /* application */ ; switch (proc_table[i].pi_state) { case SACTIVE: case SRUN: entry->hrSWRunStatus = HRSWRUNSTATUS_RUNNING; break; case SSLEEP: entry->hrSWRunStatus = HRSWRUNSTATUS_RUNNABLE; break; case SSTOP: entry->hrSWRunStatus = HRSWRUNSTATUS_NOTRUNNABLE; break; case SIDL: case SZOMB: default: entry->hrSWRunStatus = HRSWRUNSTATUS_INVALID; break; } entry->hrSWRunPerfCPU = (proc_table[i].pi_ru.ru_utime.tv_sec * 100); entry->hrSWRunPerfCPU += (proc_table[i].pi_ru.ru_utime.tv_usec / 10000000); entry->hrSWRunPerfCPU += (proc_table[i].pi_ru.ru_stime.tv_sec * 100); entry->hrSWRunPerfCPU += (proc_table[i].pi_ru.ru_stime.tv_usec / 10000000); entry->hrSWRunPerfMem = proc_table[i].pi_size; entry->hrSWRunPerfMem *= (getpagesize()/1024); /* in kB */ } free(proc_table); DEBUGMSGTL(("swrun:load:arch"," loaded %d entries\n", CONTAINER_SIZE(container))); return 0; }
caddr_t get_process_info(struct system_info *si, struct process_select *sel, int compare_index) { int i, nproc; int ptsize_util; int active_procs = 0, total_procs = 0; struct procsinfo *pp, **p_pref = pref; unsigned long pctcpu; pid_t procsindex = 0; struct proc *p; si->procstates = process_states; curtime = time(0); /* get the procsinfo structures of all running processes */ nproc = getprocs(p_info, sizeof (struct procsinfo), NULL, 0, &procsindex, nprocs); if (nproc < 0) { perror("getprocs"); quit(1); } /* the swapper has no cmd-line attached */ strcpy(p_info[0].pi_comm, "swapper"); /* get proc table */ ptsize_util = (PROCMASK(p_info[nproc-1].pi_pid)+1) * sizeof(struct proc); getkval(proc_offset, (caddr_t)p_proc, ptsize_util, "proc"); memset(process_states, 0, sizeof process_states); /* build a list of pointers to processes to show. walk through the * list of procsinfo structures instead of the proc table since the * mapping of procsinfo -> proctable is easy, the other way around * is cumbersome */ for (pp = p_info, i = 0; i < nproc; pp++, i++) { p = &p_proc[PROCMASK(pp->pi_pid)]; /* AIX marks all runnable processes as ACTIVE. We want to know which processes are sleeping, so check used cpu ticks and adjust status field accordingly */ if (p->p_stat == SACTIVE && p->p_cpticks == 0) p->p_stat = SIDL; if (pp->pi_state && (sel->system || ((pp->pi_flags & SKPROC) == 0))) { total_procs++; process_states[p->p_stat]++; if ( (pp->pi_state != SZOMB) && (sel->idle || p->p_cpticks != 0 || (p->p_stat == SACTIVE)) && (sel->uid == -1 || pp->pi_uid == (uid_t)sel->uid)) { *p_pref++ = pp; active_procs++; } } } /* the pref array now holds pointers to the procsinfo structures in * the p_info array that were selected for display */ /* sort if requested */ if (si->p_active) qsort((char *)pref, active_procs, sizeof (struct procsinfo *), proc_compares[compare_index]); si->last_pid = -1; /* no way to figure out last used pid */ si->p_total = total_procs; si->p_active = pref_len = active_procs; handle.next_proc = pref; handle.remaining = active_procs; return((caddr_t)&handle); }
/* * _get_process_data() - Build a table of all current processes * * IN: pid. * * OUT: none * * THREADSAFE! Only one thread ever gets here. * * Assumption: * Any file with a name of the form "/proc/[0-9]+/stat" * is a Linux-style stat entry. We disregard the data if they look * wrong. */ static void _get_process_data(void) { struct procsinfo proc; pid_t *pids = NULL; int npids = 0; int i; uint32_t total_job_mem = 0, total_job_vsize = 0; int pid = 0; static int processing = 0; prec_t *prec = NULL; struct jobacctinfo *jobacct = NULL; List prec_list = NULL; ListIterator itr; ListIterator itr2; if (!pgid_plugin && (cont_id == (uint64_t)NO_VAL)) { debug("cont_id hasn't been set yet not running poll"); return; } if(processing) { debug("already running, returning"); return; } processing = 1; prec_list = list_create(_destroy_prec); if(!pgid_plugin) { /* get only the processes in the proctrack container */ slurm_container_get_pids(cont_id, &pids, &npids); if (!npids) { debug4("no pids in this container %"PRIu64"", cont_id); goto finished; } for (i = 0; i < npids; i++) { pid = pids[i]; if(!getprocs(&proc, sizeof(proc), 0, 0, &pid, 1)) continue; /* Assume the process went away */ prec = xmalloc(sizeof(prec_t)); list_append(prec_list, prec); prec->pid = proc.pi_pid; prec->ppid = proc.pi_ppid; prec->usec = proc.pi_ru.ru_utime.tv_sec + proc.pi_ru.ru_utime.tv_usec * 1e-6; prec->ssec = proc.pi_ru.ru_stime.tv_sec + proc.pi_ru.ru_stime.tv_usec * 1e-6; prec->pages = proc.pi_majflt; prec->rss = (proc.pi_trss + proc.pi_drss) * pagesize; //prec->rss *= 1024; prec->vsize = (proc.pi_tsize / 1024); prec->vsize += (proc.pi_dvm * pagesize); //prec->vsize *= 1024; /* debug("vsize = %f = (%d/1024)+(%d*%d)", */ /* prec->vsize, proc.pi_tsize, proc.pi_dvm, pagesize); */ } } else { while(getprocs(&proc, sizeof(proc), 0, 0, &pid, 1) == 1) { prec = xmalloc(sizeof(prec_t)); list_append(prec_list, prec); prec->pid = proc.pi_pid; prec->ppid = proc.pi_ppid; prec->usec = proc.pi_ru.ru_utime.tv_sec + proc.pi_ru.ru_utime.tv_usec * 1e-6; prec->ssec = proc.pi_ru.ru_stime.tv_sec + proc.pi_ru.ru_stime.tv_usec * 1e-6; prec->pages = proc.pi_majflt; prec->rss = (proc.pi_trss + proc.pi_drss) * pagesize; //prec->rss *= 1024; prec->vsize = (proc.pi_tsize / 1024); prec->vsize += (proc.pi_dvm * pagesize); //prec->vsize *= 1024; /* debug("vsize = %f = (%d/1024)+(%d*%d)", */ /* prec->vsize, proc.pi_tsize, proc.pi_dvm, pagesize); */ } } if(!list_count(prec_list)) goto finished; slurm_mutex_lock(&jobacct_lock); if(!task_list || !list_count(task_list)) { slurm_mutex_unlock(&jobacct_lock); goto finished; } itr = list_iterator_create(task_list); while((jobacct = list_next(itr))) { itr2 = list_iterator_create(prec_list); while((prec = list_next(itr2))) { //debug2("pid %d ? %d", prec->ppid, jobacct->pid); if (prec->pid == jobacct->pid) { /* find all my descendents */ _get_offspring_data(prec_list, prec, prec->pid); /* tally their usage */ jobacct->max_rss = jobacct->tot_rss = MAX(jobacct->max_rss, (int)prec->rss); total_job_mem += jobacct->max_rss; jobacct->max_vsize = jobacct->tot_vsize = MAX(jobacct->max_vsize, (int)prec->vsize); total_job_vsize += prec->vsize; jobacct->max_pages = jobacct->tot_pages = MAX(jobacct->max_pages, prec->pages); jobacct->min_cpu = jobacct->tot_cpu = MAX(jobacct->min_cpu, (prec->usec + prec->ssec)); debug2("%d size now %d %d time %d", jobacct->pid, jobacct->max_rss, jobacct->max_vsize, jobacct->tot_cpu); break; } } list_iterator_destroy(itr2); } list_iterator_destroy(itr); slurm_mutex_unlock(&jobacct_lock); if (jobacct_mem_limit) { debug("Step %u.%u memory used:%u limit:%u KB", jobacct_job_id, jobacct_step_id, total_job_mem, jobacct_mem_limit); } if (jobacct_job_id && jobacct_mem_limit && (total_job_mem > jobacct_mem_limit)) { if (jobacct_step_id == NO_VAL) { error("Job %u exceeded %u KB memory limit, being " "killed", jobacct_job_id, jobacct_mem_limit); } else { error("Step %u.%u exceeded %u KB memory limit, being " "killed", jobacct_job_id, jobacct_step_id, jobacct_mem_limit); } _acct_kill_step(); } else if (jobacct_job_id && jobacct_vmem_limit && (total_job_vsize > jobacct_vmem_limit)) { if (jobacct_step_id == NO_VAL) { error("Job %u exceeded %u KB virtual memory limit, " "being killed", jobacct_job_id, jobacct_vmem_limit); } else { error("Step %u.%u exceeded %u KB virtual memory " "limit, being killed", jobacct_job_id, jobacct_step_id, jobacct_vmem_limit); } _acct_kill_step(); } finished: list_destroy(prec_list); processing = 0; return; }
int sys_getprocs(void) { return getprocs(); }