char * format_next_process(caddr_t handle, char *(*get_userid) ()) { struct prpsinfo *pp; struct handle *hp; long cputime; /* find and remember the next proc structure */ hp = (struct handle *) handle; pp = *(hp->next_proc++); hp->remaining--; /* get the process cpu usage since startup */ cputime = pp->pr_time.tv_sec; /* format this entry */ sprintf(fmt, Proc_format, pp->pr_pid, pp->pr_pgrp, (*get_userid) (pp->pr_uid), format_prio(pp), format_k(pagetok(pp->pr_size)), format_k(pagetok(pp->pr_rssize)), format_state(pp), format_time(cputime), clip_percent(weighted_cpu(pp)), clip_percent(percent_cpu(pp)), printable(pp->pr_fname)); /* return the result */ return (fmt); }
char * format_next_process(caddr_t handle, char *(*get_userid)()) { register struct handle *hp; register struct procsinfo *pi; register struct proc *p; char *uname; long cpu_time; int proc_size, proc_ress; char size_unit = 'K'; char ress_unit = 'K'; hp = (struct handle *)handle; if (hp->remaining == 0) { /* safe guard */ fmt[0] = '\0'; return fmt; } pi = *(hp->next_proc++); hp->remaining--; p = &p_proc[PROCMASK(pi->pi_pid)]; cpu_time = PROCTIME(pi); /* we disply sizes up to 10M in KiloBytes, beyond 10M in MegaBytes */ if ((proc_size = (pi->pi_tsize/1024+pi->pi_dvm)*4) > 10240) { proc_size /= 1024; size_unit = 'M'; } if ((proc_ress = (pi->pi_trss + pi->pi_drss)*4) > 10240) { proc_ress /= 1024; ress_unit = 'M'; } sprintf(fmt, Proc_format , pi->pi_pid, /* PID */ (*get_userid)(pi->pi_uid), /* login name */ getpriority(PRIO_PROCESS, pi->pi_pid), EXTRACT_NICE(p), /* fixed or vari */ proc_size, /* size */ size_unit, /* K or M */ proc_ress, /* resident */ ress_unit, /* K or M */ state_abbrev[p->p_stat], /* process state */ format_time(cpu_time), /* time used */ weighted_cpu(pi), /* WCPU */ 100.0 * double_pctcpu(p), /* CPU */ printable(pi->pi_comm), /* COMM */ (pi->pi_flags & SKPROC) == 0 ? "" : " (sys)" /* kernel process? */ ); return(fmt); }
/* get process table */ void getptable(struct prpsinfo * baseptr) { struct prpsinfo *currproc; /* ptr to current proc struct */ int i, numprocs; struct dirent *direntp; struct oldproc *op, *endbase; static struct timeval lasttime, thistime; static double timediff, alpha, beta; /* measure time between last call to getptable and current call */ gettimeofday(&thistime, NULL); /* * To avoid divides, we keep times in nanoseconds. This is scaled by 1e7 * rather than 1e9 so that when we divide we get percent. */ timediff = ((double) thistime.tv_sec * 1.0e7 - (double) lasttime.tv_sec * 1.0e7) + ((double) thistime.tv_usec * 10 - (double) lasttime.tv_usec * 10); /* * Under extreme load conditions, sca has experienced an assert(timediff > * 0) failure here. His guess is that sometimes timed resets the time * backwards and gettimeofday returns a lower number on a later call. To * be on the safe side I fix it here by setting timediff to some arbitrary * small value (in nanoseconds). */ if (timediff <= 0.0) timediff = 100.0; lasttime = thistime; /* prepare for next round */ /* * constants for exponential decaying average. avg = alpha * new + beta * * avg The goal is 50% decay in 30 sec. However if the sample period is * greater than 30 sec, there's not a lot we can do. */ if (timediff < 30.0e7) { alpha = 0.5 * (timediff / 15.0e7); beta = 1.0 - alpha; } else { alpha = 0.5; beta = 0.5; } assert(alpha >= 0); assert(alpha <= 1); assert(beta >= 0); assert(beta <= 1); endbase = oldbase + oldprocs; currproc = baseptr; for (numprocs = 0, rewinddir(procdir); direntp = readdir(procdir);) { int fd; if ((fd = open(direntp->d_name, O_RDONLY)) < 0) continue; currproc = baseptr + numprocs; if (ioctl(fd, PIOCPSINFO, currproc) < 0) { (void) close(fd); continue; } /* * SVR4 doesn't keep track of CPU% in the kernel, so we have to do our * own. See if we've heard of this process before. If so, compute % * based on CPU since last time. */ op = oldbase + HASH(currproc->pr_pid); for (;;) { if (op->oldpid == -1) /* not there */ break; if (op->oldpid == currproc->pr_pid) { /* found old data */ percent_cpu(currproc) = ((currproc->pr_time.tv_sec * 1.0e9 + currproc->pr_time.tv_nsec) - op->oldtime) / timediff; weighted_cpu(currproc) = op->oldpct * beta + percent_cpu(currproc) * alpha; break; } op++; /* try next entry in hash table */ if (op == endbase) /* table wrap around */ op = oldbase; } /* Otherwise, it's new, so use all of its CPU time */ if (op->oldpid == -1) { if (lasttime.tv_sec) { percent_cpu(currproc) = (currproc->pr_time.tv_sec * 1.0e9 + currproc->pr_time.tv_nsec) / timediff; weighted_cpu(currproc) = percent_cpu(currproc); } else { /* first screen -- no difference is possible */ percent_cpu(currproc) = 0.0; weighted_cpu(currproc) = 0.0; } } #ifdef DO_MAPSIZE size(fd, currproc); #endif numprocs++; (void) close(fd); /* * Bug: in case process count grew so dramatically as to exceed to * table size. We give up on a full scan. the chances of this to * happen are extremely slim due to the big factor we're using. * getting nproc from nlist is not worth the headache. realloc * wouldn't work either because we have pointers to the proc table so * we cannot move it around. */ if (numprocs >= ptable_size) { fprintf(stderr, "preallocated proc table size (%d) exceeded, " "skipping some processes\n", ptable_size); break; } } nproc = numprocs; /* * Save current CPU time for next time around For the moment recreate the * hash table each time, as the code is easier that way. */ oldprocs = 2 * nproc; endbase = oldbase + oldprocs; for (op = oldbase; op < endbase; op++) op->oldpid = -1; for (i = 0, currproc = baseptr; i < nproc; i++, currproc++) { /* find an empty spot */ op = oldbase + HASH(currproc->pr_pid); for (;;) { if (op->oldpid == -1) break; op++; if (op == endbase) op = oldbase; } op->oldpid = currproc->pr_pid; op->oldtime = (currproc->pr_time.tv_sec * 1.0e9 + currproc->pr_time.tv_nsec); op->oldpct = weighted_cpu(currproc); } }