int listProc(struct Process * proc) { printf("%-7d %c ", proc->pid, proc->state); if (TESTOPT(OPT_USER)) { printf("%-10s %-10s ", uidToName(proc->euid), gidToName(proc->egid)); } if (TESTOPT(OPT_IO)) { printf("%-10d %-10d", proc->read_bytes/1024, (proc->write_bytes - proc->cancelled_write_bytes)/1024); } if (TESTOPT(OPT_MEM)) { printf("%-10d %-10d %-10d %-10d %-10d ", proc->size, proc->resident, proc->share, proc->text, proc->data); } printf("%-25s\n", (proc->has_commandline? fullArgv(proc):proc->name)); return 0; }
int listHeader() { printf("%-7s %c ", "PID", 'S'); if (TESTOPT(OPT_USER)) { printf("%-10s %-10s ", "USER", "GROUP"); } if (TESTOPT(OPT_IO)) { printf("%-10s %-10s", "READ", "WRITE"); } if (TESTOPT(OPT_MEM)) { printf("%-10s %-10s %-10s %-10s %-10s ", "SIZE", "RESIDENT", "SHARE", "TEXT", "DATA"); } printf("%-25s\n", "NAME"); return 0; }
int colouriseProcs() { struct Filter *filter; struct Process *proc; for (proc = first_process; (proc->next_process); proc = proc->next_process) { if ( !TESTOPT(OPT_KERNEL) && (proc->size == 0)) { continue; } if (first_filter == NULL ) { proc->colour = 1; continue; } for(filter = first_filter; (filter); filter = filter->next_filter) { if ((filter->pid > 0) && filter->pid == proc->pid) { colourise(proc); continue; } else if ((filter->name) && 0 == strcmp(filter->name, (proc->has_commandline? fullArgv(proc):proc->name))) { colourise(proc); continue; } else if ((filter->regular_expression) && 0 == regexec(&filter->preg, (proc->has_commandline? fullArgv(proc):proc->name), 0, 0, 0)) { colourise(proc); } } } return 0; }
char * fullArgv(struct Process *proc) { static char buf[1024]; char *c; unsigned int i; c = buf; c += sprintf(c, "%s", ((proc->argv[0][0] == '/')? basename(proc->argv[0]):proc->argv[0]) ); if (TESTOPT(OPT_FULL)) { for (i=1; i < proc->argc; i++) { c += sprintf(c, " %s", proc->argv[i]); } } return buf; }
/* * vnstrategy: * * Run strategy routine for VN device. We use VOP_READ/VOP_WRITE calls * for vnode-backed vn's, and the swap_pager_strategy() call for * vm_object-backed vn's. */ static int vnstrategy(struct dev_strategy_args *ap) { cdev_t dev = ap->a_head.a_dev; struct bio *bio = ap->a_bio; struct buf *bp; struct bio *nbio; int unit; struct vn_softc *vn; int error; unit = dkunit(dev); vn = dev->si_drv1; KKASSERT(vn != NULL); bp = bio->bio_buf; IFOPT(vn, VN_DEBUG) kprintf("vnstrategy(%p): unit %d\n", bp, unit); if ((vn->sc_flags & VNF_INITED) == 0) { bp->b_error = ENXIO; bp->b_flags |= B_ERROR; biodone(bio); return(0); } bp->b_resid = bp->b_bcount; /* * The vnode device is using disk/slice label support. * * The dscheck() function is called for validating the * slices that exist ON the vnode device itself, and * translate the "slice-relative" block number, again. * dscheck() will call biodone() and return NULL if * we are at EOF or beyond the device size. */ nbio = bio; /* * Use the translated nbio from this point on */ if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) { /* * Freeblks is not handled for vnode-backed elements yet. */ bp->b_resid = 0; /* operation complete */ } else if (vn->sc_vp) { /* * VNODE I/O * * If an error occurs, we set B_ERROR but we do not set * B_INVAL because (for a write anyway), the buffer is * still valid. */ struct uio auio; struct iovec aiov; bzero(&auio, sizeof(auio)); aiov.iov_base = bp->b_data; aiov.iov_len = bp->b_bcount; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = nbio->bio_offset; auio.uio_segflg = UIO_SYSSPACE; if (bp->b_cmd == BUF_CMD_READ) auio.uio_rw = UIO_READ; else auio.uio_rw = UIO_WRITE; auio.uio_resid = bp->b_bcount; auio.uio_td = curthread; /* * Don't use IO_DIRECT here, it really gets in the way * due to typical blocksize differences between the * fs backing the VN device and whatever is running on * the VN device. */ switch (bp->b_cmd) { case (BUF_CMD_READ): vn_lock(vn->sc_vp, LK_SHARED | LK_RETRY); error = VOP_READ(vn->sc_vp, &auio, IO_RECURSE, vn->sc_cred); break; case (BUF_CMD_WRITE): vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_WRITE(vn->sc_vp, &auio, IO_RECURSE, vn->sc_cred); break; case (BUF_CMD_FLUSH): auio.uio_resid = 0; vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(vn->sc_vp, MNT_WAIT, 0); break; default: auio.uio_resid = 0; error = 0; break; } vn_unlock(vn->sc_vp); bp->b_resid = auio.uio_resid; if (error) { bp->b_error = error; bp->b_flags |= B_ERROR; } /* operation complete */ } else if (vn->sc_object) { /* * OBJT_SWAP I/O (handles read, write, freebuf) * * We have nothing to do if freeing blocks on a reserved * swap area, othrewise execute the op. */ if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) { bp->b_resid = 0; /* operation complete */ } else { swap_pager_strategy(vn->sc_object, nbio); return(0); /* NOT REACHED */ } } else { bp->b_resid = bp->b_bcount; bp->b_flags |= B_ERROR | B_INVAL; bp->b_error = EINVAL; /* operation complete */ } biodone(nbio); return(0); }