/* * Build proc info array by reading in proc list from a crash dump. * We reallocate kd->procbase as necessary. */ static int kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc, u_long a_zombproc) { struct kinfo_proc *bp = kd->procbase; int acnt, zcnt; struct proc *p; if (KREAD(kd, a_allproc, &p)) { _kvm_err(kd, kd->program, "cannot read allproc"); return (-1); } acnt = kvm_proclist(kd, what, arg, p, bp); if (acnt < 0) return (acnt); if (KREAD(kd, a_zombproc, &p)) { _kvm_err(kd, kd->program, "cannot read zombproc"); return (-1); } zcnt = kvm_proclist(kd, what, arg, p, bp + acnt); if (zcnt < 0) zcnt = 0; return (acnt + zcnt); }
int _kvm_stat_ntfs(kvm_t *kd, struct kinfo_file *kf, struct vnode *vp) { struct ntnode ntnode; struct fnode fn; struct ntfsmount ntm; /* * To get the ntnode, we have to go in two steps - firstly * to read appropriate struct fnode and then getting the address * of ntnode and reading it's contents */ if (KREAD(kd, (u_long)VTOF(vp), &fn)) { _kvm_err(kd, kd->program, "can't read fnode at %p", VTOF(vp)); return (-1); } if (KREAD(kd, (u_long)FTONT(&fn), &ntnode)) { _kvm_err(kd, kd->program, "can't read ntnode at %p", FTONT(&fn)); return (-1); } if (KREAD(kd, (u_long)ntnode.i_mp, &ntm)) { _kvm_err(kd, kd->program, "can't read ntfsmount at %p", ntnode.i_mp); return (-1); } kf->va_fsid = ntnode.i_dev & 0xffff; kf->va_fileid = (long)ntnode.i_number; kf->va_mode = (mode_t)ntm.ntm_mode | _kvm_getftype(vp->v_type); kf->va_size = fn.f_size; kf->va_rdev = 0; /* XXX */ return (0); }
int _kvm_initvtop(kvm_t *kd) { struct vmstate *vm; struct nlist nl[4]; struct uvmexp uvmexp; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == 0) return (-1); kd->vmst = vm; nl[0].n_name = "Sysmap"; nl[1].n_name = "Sysmapsize"; nl[2].n_name = "uvmexp"; nl[3].n_name = 0; if (kvm_nlist(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } if (KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmap)) { _kvm_err(kd, kd->program, "cannot read Sysmap"); return (-1); } if (KREAD(kd, (u_long)nl[1].n_value, &vm->Sysmapsize)) { _kvm_err(kd, kd->program, "cannot read Sysmapsize"); return (-1); } /* * We are only interested in the first three fields of struct * uvmexp, so do not try to read more than necessary (especially * in case the layout changes). */ if (kvm_read(kd, (u_long)nl[2].n_value, &uvmexp, 3 * sizeof(int)) != 3 * sizeof(int)) { _kvm_err(kd, kd->program, "cannot read uvmexp"); return (-1); } vm->pagesize = uvmexp.pagesize; vm->pagemask = uvmexp.pagemask; vm->pageshift = uvmexp.pageshift; /* * Older kernels might not have this symbol; in which case * we use the value of VM_MIN_KERNE_ADDRESS they must have. */ nl[0].n_name = "Sysmapbase"; nl[1].n_name = 0; if (kvm_nlist(kd, nl) != 0 || KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmapbase)) vm->Sysmapbase = (vaddr_t)CKSSEG_BASE; return (0); }
/* * Convert credentials located in kernel space address 'cred' and store * them in the appropriate members of 'eproc'. */ static int _kvm_convertcred(kvm_t *kd, u_long cred, struct eproc *eproc) { struct kvm_kauth_cred kauthcred; struct ki_pcred *pc = &eproc->e_pcred; struct ki_ucred *uc = &eproc->e_ucred; if (KREAD(kd, cred, &kauthcred) != 0) return (-1); /* inlined version of kauth_cred_to_pcred, see kauth(9). */ pc->p_ruid = kauthcred.cr_uid; pc->p_svuid = kauthcred.cr_svuid; pc->p_rgid = kauthcred.cr_gid; pc->p_svgid = kauthcred.cr_svgid; pc->p_refcnt = kauthcred.cr_refcnt; pc->p_pad = NULL; /* inlined version of kauth_cred_to_ucred(), see kauth(9). */ uc->cr_ref = kauthcred.cr_refcnt; uc->cr_uid = kauthcred.cr_euid; uc->cr_gid = kauthcred.cr_egid; uc->cr_ngroups = (uint32_t)MIN(kauthcred.cr_ngroups, sizeof(uc->cr_groups) / sizeof(uc->cr_groups[0])); memcpy(uc->cr_groups, kauthcred.cr_groups, uc->cr_ngroups * sizeof(uc->cr_groups[0])); return (0); }
/* * If the current element is the left side of the parent the next element * will be a left side traversal of the parent's right side. If the parent * has no right side the next element will be the parent. * * If the current element is the right side of the parent the next element * is the parent. * * If the parent is NULL we are done. */ static uintptr_t kvm_nextlwp(kvm_t *kd, uintptr_t lwppos, struct lwp *lwp, struct proc *proc) { uintptr_t nextpos; nextpos = (uintptr_t)lwp->u.lwp_rbnode.rbe_parent; if (nextpos) { if (KREAD(kd, nextpos, lwp)) { _kvm_err(kd, kd->program, "can't read lwp at %p", (void *)lwppos); return ((uintptr_t)-1); } if (lwppos == (uintptr_t)lwp->u.lwp_rbnode.rbe_left) { /* * If we had gone down the left side the next element * is a left hand traversal of the parent's right * side, or the parent itself if there is no right * side. */ lwppos = (uintptr_t)lwp->u.lwp_rbnode.rbe_right; if (lwppos) nextpos = kvm_lwptraverse(kd, lwp, lwppos); } else { /* * If we had gone down the right side the next * element is the parent. */ /* nextpos = nextpos */ } } return(nextpos); }
static void getinfo(struct Info *stats) { int mib[2]; size_t size; int i; cpureadstats(); drvreadstats(); size = sizeof(stats->nchstats); if (sysctlbyname("vfs.namecache_stats", &stats->nchstats, &size, NULL, 0) < 0) { error("can't get namecache statistics: %s\n", strerror(errno)); memset(&stats->nchstats, 0, sizeof(stats->nchstats)); } if (nintr) NREAD(X_INTRCNT, stats->intrcnt, nintr * LONG); for (i = 0; i < nevcnt; i++) KREAD(ie_head[i].ie_count, &stats->evcnt[i], sizeof stats->evcnt[i]); size = sizeof(stats->uvmexp); mib[0] = CTL_VM; mib[1] = VM_UVMEXP2; if (sysctl(mib, 2, &stats->uvmexp, &size, NULL, 0) < 0) { error("can't get uvmexp: %s\n", strerror(errno)); memset(&stats->uvmexp, 0, sizeof(stats->uvmexp)); } size = sizeof(stats->Total); mib[0] = CTL_VM; mib[1] = VM_METER; if (sysctl(mib, 2, &stats->Total, &size, NULL, 0) < 0) { error("Can't get kernel info: %s\n", strerror(errno)); memset(&stats->Total, 0, sizeof(stats->Total)); } }
int _kvm_stat_udf(kvm_t *kd, struct kinfo_file *kf, struct vnode *vp) { struct unode up; struct file_entry fentry; struct umount um; if (KREAD(kd, (u_long)VTOU(vp), &up)) { _kvm_err(kd, kd->program, "can't read unode at %p", VTOU(vp)); return (-1); } if (KREAD(kd, (u_long)up.u_fentry, &fentry)) { _kvm_err(kd, kd->program, "can't read file_entry at %p", up.u_fentry); return (-1); } if (KREAD(kd, (u_long)up.u_ump, &um)) { _kvm_err(kd, kd->program, "can't read umount at %p", up.u_ump); return (-1); } kf->va_fsid = up.u_dev; kf->va_fileid = (long)up.u_ino; kf->va_mode = udf_permtomode(&up); /* XXX */ kf->va_rdev = 0; if (vp->v_type & VDIR) { /* * Directories that are recorded within their ICB will show * as having 0 blocks recorded. Since tradition dictates * that directories consume at least one logical block, * make it appear so. */ if (fentry.logblks_rec != 0) { kf->va_size = letoh64(fentry.logblks_rec) * um.um_bsize; } else { kf->va_size = um.um_bsize; } } else { kf->va_size = letoh64(fentry.inf_len); } return (0); }
/* * Get file structures. */ static int kvm_deadfiles(kvm_t *kd, int op, int arg, long filehead_o, int nfiles) { int buflen = kd->arglen, n = 0; struct file *fp; char *where = kd->argspc; struct filelist filehead; /* * first copyout filehead */ if (buflen > sizeof (filehead)) { if (KREAD(kd, filehead_o, &filehead)) { _kvm_err(kd, kd->program, "can't read filehead"); return (0); } buflen -= sizeof (filehead); where += sizeof (filehead); *(struct filelist *)kd->argspc = filehead; } /* * followed by an array of file structures */ for (fp = filehead.lh_first; fp != NULL; fp = fp->f_list.le_next) { if (buflen > sizeof (struct file)) { if (KREAD(kd, (long)fp, ((struct file *)where))) { _kvm_err(kd, kd->program, "can't read kfp"); return (0); } buflen -= sizeof (struct file); fp = (struct file *)where; where += sizeof (struct file); n++; } } if (n != nfiles) { _kvm_err(kd, kd->program, "inconsistent nfiles"); return (0); } return (nfiles); }
static ssize_t kvm_read(kvm_t *kd, unsigned long addr, void *buf, size_t nbytes) { ssize_t len; if (lseek(kd->fd, KOFFSET(addr), SEEK_SET) == -1) { _kvm_error(kd->errbuf, NULL); return -1; } if ((len = KREAD(kd->fd, buf, nbytes, addr)) < 0) { _kvm_error(kd->errbuf, NULL); return -1; } return len; }
static void get_interrupt_events(void) { struct evcntlist allevents; struct evcnt evcnt, *evptr; intr_evcnt_t *ie; intr_evcnt_t *n; if (!NREAD(X_ALLEVENTS, &allevents, sizeof allevents)) return; evptr = TAILQ_FIRST(&allevents); for (; evptr != NULL; evptr = TAILQ_NEXT(&evcnt, ev_list)) { if (!KREAD(evptr, &evcnt, sizeof evcnt)) return; if (evcnt.ev_type != EVCNT_TYPE_INTR) continue; n = realloc(ie_head, sizeof *ie * (nevcnt + 1)); if (n == NULL) { error("realloc failed"); die(0); } ie_head = n; ie = ie_head + nevcnt; ie->ie_group = malloc(evcnt.ev_grouplen + 1); ie->ie_name = malloc(evcnt.ev_namelen + 1); if (ie->ie_group == NULL || ie->ie_name == NULL) return; if (!KREAD(evcnt.ev_group, ie->ie_group, evcnt.ev_grouplen + 1)) return; if (!KREAD(evcnt.ev_name, ie->ie_name, evcnt.ev_namelen + 1)) return; ie->ie_count = &evptr->ev_count; ie->ie_loc = 0; nevcnt++; } }
/* * Helper routine which traverses the left hand side of a red-black sub-tree. */ static uintptr_t kvm_lwptraverse(kvm_t *kd, struct lwp *lwp, uintptr_t lwppos) { for (;;) { if (KREAD(kd, lwppos, lwp)) { _kvm_err(kd, kd->program, "can't read lwp at %p", (void *)lwppos); return ((uintptr_t)-1); } if (lwp->u.lwp_rbnode.rbe_left == NULL) break; lwppos = (uintptr_t)lwp->u.lwp_rbnode.rbe_left; } return(lwppos); }
/* * kvm_getloadavg() -- Get system load averages, from live or dead kernels. * * Put `nelem' samples into `loadavg' array. * Return number of samples retrieved, or -1 on error. */ int kvm_getloadavg(kvm_t *kd, double loadavg[], int nelem) { struct loadavg loadinfo; struct nlist *p; int fscale, i; if (ISALIVE(kd)) return (getloadavg(loadavg, nelem)); if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p); _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (-1); } #define KREAD(kd, addr, obj) \ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj)) if (KREAD(kd, nl[X_AVERUNNABLE].n_value, &loadinfo)) { _kvm_err(kd, kd->program, "can't read averunnable"); return (-1); } /* * Old kernels have fscale separately; if not found assume * running new format. */ if (!KREAD(kd, nl[X_FSCALE].n_value, &fscale)) loadinfo.fscale = fscale; nelem = MIN(nelem, (int)(sizeof(loadinfo.ldavg) / sizeof(fixpt_t))); for (i = 0; i < nelem; i++) loadavg[i] = (double) loadinfo.ldavg[i] / loadinfo.fscale; return (nelem); }
int _kvm_stat_cd9660(kvm_t *kd, struct kinfo_file *kf, struct vnode *vp) { struct iso_node inode; if (KREAD(kd, (u_long)VTOI(vp), &inode)) { _kvm_err(kd, kd->program, "can't read inode at %p", VTOI(vp)); return (-1); } kf->va_fsid = inode.i_dev & 0xffff; kf->va_fileid = (long)inode.i_number; kf->va_mode = inode.inode.iso_mode; kf->va_size = inode.i_size; kf->va_rdev = inode.i_dev; return (0); }
/* * Build proc info array by reading in proc list from a crash dump. * We reallocate kd->procbase as necessary. */ static int kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc, int allproc_hsize) { struct kinfo_proc *bp; struct proc *p; struct proclist **pl; int cnt, partcnt, n; u_long nextoff; cnt = partcnt = 0; nextoff = 0; /* * Dynamically allocate space for all the elements of the * allprocs array and KREAD() them. */ pl = _kvm_malloc(kd, allproc_hsize * sizeof(struct proclist *)); for (n = 0; n < allproc_hsize; n++) { pl[n] = _kvm_malloc(kd, sizeof(struct proclist)); nextoff = a_allproc + (n * sizeof(struct proclist)); if (KREAD(kd, (u_long)nextoff, pl[n])) { _kvm_err(kd, kd->program, "can't read proclist at 0x%lx", a_allproc); return (-1); } /* Ignore empty proclists */ if (LIST_EMPTY(pl[n])) continue; bp = kd->procbase + cnt; p = pl[n]->lh_first; partcnt = kvm_proclist(kd, what, arg, p, bp); if (partcnt < 0) { free(pl[n]); return (partcnt); } cnt += partcnt; free(pl[n]); } return (cnt); }
void fetchtcp(void) { KREAD((void *)namelist[0].n_value, &newstat, sizeof(newstat)); ADJINETCTR(curstat, oldstat, newstat, tcps_connattempt); ADJINETCTR(curstat, oldstat, newstat, tcps_accepts); ADJINETCTR(curstat, oldstat, newstat, tcps_connects); ADJINETCTR(curstat, oldstat, newstat, tcps_drops); ADJINETCTR(curstat, oldstat, newstat, tcps_conndrops); ADJINETCTR(curstat, oldstat, newstat, tcps_timeoutdrop); ADJINETCTR(curstat, oldstat, newstat, tcps_keepdrops); ADJINETCTR(curstat, oldstat, newstat, tcps_persistdrops); ADJINETCTR(curstat, oldstat, newstat, tcps_segstimed); ADJINETCTR(curstat, oldstat, newstat, tcps_rttupdated); ADJINETCTR(curstat, oldstat, newstat, tcps_delack); ADJINETCTR(curstat, oldstat, newstat, tcps_rexmttimeo); ADJINETCTR(curstat, oldstat, newstat, tcps_persisttimeo); ADJINETCTR(curstat, oldstat, newstat, tcps_keepprobe); ADJINETCTR(curstat, oldstat, newstat, tcps_keeptimeo); ADJINETCTR(curstat, oldstat, newstat, tcps_sndtotal); ADJINETCTR(curstat, oldstat, newstat, tcps_sndpack); ADJINETCTR(curstat, oldstat, newstat, tcps_sndrexmitpack); ADJINETCTR(curstat, oldstat, newstat, tcps_sndacks); ADJINETCTR(curstat, oldstat, newstat, tcps_sndprobe); ADJINETCTR(curstat, oldstat, newstat, tcps_sndwinup); ADJINETCTR(curstat, oldstat, newstat, tcps_sndurg); ADJINETCTR(curstat, oldstat, newstat, tcps_sndctrl); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvtotal); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvpack); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvduppack); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvpartduppack); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvoopack); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvdupack); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvackpack); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvwinprobe); ADJINETCTR(curstat, oldstat, newstat, tcps_rcvwinupd); if (update == UPDATE_TIME) memcpy(&oldstat, &newstat, sizeof(oldstat)); }
void fetchip6(void) { KREAD((void *)namelist[0].n_value, &newstat, sizeof(newstat)); ADJINETCTR(curstat, oldstat, newstat, ip6s_total); ADJINETCTR(curstat, oldstat, newstat, ip6s_toosmall); ADJINETCTR(curstat, oldstat, newstat, ip6s_tooshort); ADJINETCTR(curstat, oldstat, newstat, ip6s_badoptions); ADJINETCTR(curstat, oldstat, newstat, ip6s_badvers); ADJINETCTR(curstat, oldstat, newstat, ip6s_exthdrtoolong); ADJINETCTR(curstat, oldstat, newstat, ip6s_delivered); ADJINETCTR(curstat, oldstat, newstat, ip6s_notmember); ADJINETCTR(curstat, oldstat, newstat, ip6s_toomanyhdr); ADJINETCTR(curstat, oldstat, newstat, ip6s_nogif); ADJINETCTR(curstat, oldstat, newstat, ip6s_fragments); ADJINETCTR(curstat, oldstat, newstat, ip6s_fragdropped); ADJINETCTR(curstat, oldstat, newstat, ip6s_fragtimeout); ADJINETCTR(curstat, oldstat, newstat, ip6s_fragoverflow); ADJINETCTR(curstat, oldstat, newstat, ip6s_reassembled); ADJINETCTR(curstat, oldstat, newstat, ip6s_m1); ADJINETCTR(curstat, oldstat, newstat, ip6s_mext1); ADJINETCTR(curstat, oldstat, newstat, ip6s_mext2m); ADJINETCTR(curstat, oldstat, newstat, ip6s_forward); ADJINETCTR(curstat, oldstat, newstat, ip6s_cantforward); ADJINETCTR(curstat, oldstat, newstat, ip6s_redirectsent); ADJINETCTR(curstat, oldstat, newstat, ip6s_localout); ADJINETCTR(curstat, oldstat, newstat, ip6s_rawout); ADJINETCTR(curstat, oldstat, newstat, ip6s_odropped); ADJINETCTR(curstat, oldstat, newstat, ip6s_noroute); ADJINETCTR(curstat, oldstat, newstat, ip6s_fragmented); ADJINETCTR(curstat, oldstat, newstat, ip6s_ofragments); ADJINETCTR(curstat, oldstat, newstat, ip6s_cantfrag); ADJINETCTR(curstat, oldstat, newstat, ip6s_badscope); if (update == UPDATE_TIME) memcpy(&oldstat, &newstat, sizeof(oldstat)); }
/* * Get file structures. */ static int kvm_deadfiles(kvm_t *kd, int op __unused, int arg __unused, long allproc_o, int nprocs __unused) { struct proc proc; struct filedesc filed; int buflen = kd->arglen, ocnt = 0, n = 0, once = 0, i; struct file **ofiles; struct file *fp; struct proc *p; char *where = kd->argspc; if (buflen < (int)(sizeof(struct file *) + sizeof(struct file))) return (0); if (KREAD(kd, allproc_o, &p)) { _kvm_err(kd, kd->program, "cannot read allproc"); return (0); } for (; p != NULL; p = LIST_NEXT(&proc, p_list)) { if (KREAD(kd, (u_long)p, &proc)) { _kvm_err(kd, kd->program, "can't read proc at %p", p); goto fail; } if (proc.p_state == PRS_NEW) continue; if (proc.p_fd == NULL) continue; if (KREAD(kd, (u_long)p->p_fd, &filed)) { _kvm_err(kd, kd->program, "can't read filedesc at %p", p->p_fd); goto fail; } if (filed.fd_lastfile + 1 > ocnt) { ocnt = filed.fd_lastfile + 1; free(ofiles); ofiles = (struct file **)_kvm_malloc(kd, ocnt * sizeof(struct file *)); if (ofiles == 0) return (0); } if (KREADN(kd, (u_long)filed.fd_ofiles, ofiles, ocnt * sizeof(struct file *))) { _kvm_err(kd, kd->program, "can't read ofiles at %p", filed.fd_ofiles); return (0); } for (i = 0; i <= filed.fd_lastfile; i++) { if ((fp = ofiles[i]) == NULL) continue; /* * copyout filehead (legacy) */ if (!once) { *(struct file **)kd->argspc = fp; *(struct file **)where = fp; buflen -= sizeof (fp); where += sizeof (fp); once = 1; } if (buflen < (int)sizeof(struct file)) goto fail; if (KREAD(kd, (long)fp, ((struct file *)where))) { _kvm_err(kd, kd->program, "can't read kfp"); goto fail; } buflen -= sizeof (struct file); fp = (struct file *)where; where += sizeof (struct file); n++; } } free(ofiles); return (n); fail: free(ofiles); return (0); }
char * kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt) { int mib[2], st, n, nfiles, nprocs; size_t size; _kvm_syserr(kd, kd->program, "kvm_getfiles has been broken for years"); return (0); if (ISALIVE(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_FILE; st = sysctl(mib, 2, NULL, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getfiles"); return (0); } if (kd->argspc == 0) kd->argspc = (char *)_kvm_malloc(kd, size); else if (kd->arglen < (int)size) kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size); if (kd->argspc == 0) return (0); kd->arglen = size; st = sysctl(mib, 2, kd->argspc, &size, NULL, 0); if (st != 0) { _kvm_syserr(kd, kd->program, "kvm_getfiles"); return (0); } nfiles = size / sizeof(struct xfile); } else { struct nlist nl[4], *p; nl[0].n_name = "_allproc"; nl[1].n_name = "_nprocs"; nl[2].n_name = "_nfiles"; nl[3].n_name = 0; if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) ; _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (0); } if (KREAD(kd, nl[1].n_value, &nprocs)) { _kvm_err(kd, kd->program, "can't read nprocs"); return (0); } if (KREAD(kd, nl[2].n_value, &nfiles)) { _kvm_err(kd, kd->program, "can't read nfiles"); return (0); } size = sizeof(void *) + (nfiles + 10) * sizeof(struct file); if (kd->argspc == 0) kd->argspc = (char *)_kvm_malloc(kd, size); else if (kd->arglen < (int)size) kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size); if (kd->argspc == 0) return (0); kd->arglen = size; n = kvm_deadfiles(kd, op, arg, nl[0].n_value, nprocs); if (n != nfiles) { _kvm_err(kd, kd->program, "inconsistant nfiles"); return (0); } nfiles = n; } *cnt = nfiles; return (kd->argspc); }
char * kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt) { int mib[2], st, nfiles; size_t size; struct file *fp, *fplim; struct filelist filehead; if (kvm_ishost(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_FILE; st = sysctl(mib, 2, NULL, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getfiles"); return (0); } if (kd->argspc == 0) kd->argspc = (char *)_kvm_malloc(kd, size); else if (kd->arglen < size) kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size); if (kd->argspc == 0) return (0); kd->arglen = size; st = sysctl(mib, 2, kd->argspc, &size, NULL, 0); if (st == -1 || size < sizeof(filehead)) { _kvm_syserr(kd, kd->program, "kvm_getfiles"); return (0); } filehead = *(struct filelist *)kd->argspc; fp = (struct file *)(kd->argspc + sizeof (filehead)); fplim = (struct file *)(kd->argspc + size); for (nfiles = 0; filehead.lh_first && (fp < fplim); nfiles++, fp++) filehead.lh_first = fp->f_list.le_next; } else { struct nlist nl[3], *p; nl[0].n_name = "_filehead"; nl[1].n_name = "_nfiles"; nl[2].n_name = 0; if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) ; _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (0); } if (KREAD(kd, nl[0].n_value, &nfiles)) { _kvm_err(kd, kd->program, "can't read nfiles"); return (0); } size = sizeof(filehead) + (nfiles + 10) * sizeof(struct file); if (kd->argspc == 0) kd->argspc = (char *)_kvm_malloc(kd, size); else if (kd->arglen < size) kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size); if (kd->argspc == 0) return (0); kd->arglen = size; nfiles = kvm_deadfiles(kd, op, arg, nl[1].n_value, nfiles); if (nfiles == 0) return (0); } *cnt = nfiles; return (kd->argspc); }
struct kinfo_proc * kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt) { int mib[4], st, nprocs; int miblen = ((op & ~KERN_PROC_FLAGMASK) == KERN_PROC_ALL) ? 3 : 4; size_t size; if (kd->procbase != 0) { free((void *)kd->procbase); /* * Clear this pointer in case this call fails. Otherwise, * kvm_close() will free it again. */ kd->procbase = 0; } if (kvm_ishost(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = op; mib[3] = arg; st = sysctl(mib, miblen, NULL, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (0); } do { size += size / 10; kd->procbase = (struct kinfo_proc *) _kvm_realloc(kd, kd->procbase, size); if (kd->procbase == 0) return (0); st = sysctl(mib, miblen, kd->procbase, &size, NULL, 0); } while (st == -1 && errno == ENOMEM); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (0); } if (size % sizeof(struct kinfo_proc) != 0) { _kvm_err(kd, kd->program, "proc size mismatch (%zd total, %zd chunks)", size, sizeof(struct kinfo_proc)); return (0); } nprocs = size / sizeof(struct kinfo_proc); } else { struct nlist nl[4], *p; nl[0].n_name = "_nprocs"; nl[1].n_name = "_allproc"; nl[2].n_name = "_zombproc"; nl[3].n_name = 0; if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) ; _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (0); } if (KREAD(kd, nl[0].n_value, &nprocs)) { _kvm_err(kd, kd->program, "can't read nprocs"); return (0); } nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value, nl[2].n_value); #ifdef notdef size = nprocs * sizeof(struct kinfo_proc); (void)realloc(kd->procbase, size); #endif } *cnt = nprocs; return (kd->procbase); }
/* * Read proc's from memory file into buffer bp, which has space to hold * at most maxcnt procs. */ static int kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, struct kinfo_proc *bp) { struct pgrp pgrp; struct pgrp tpgrp; struct globaldata gdata; struct session sess; struct session tsess; struct tty tty; struct proc proc; struct ucred ucred; struct thread thread; struct proc pproc; struct cdev cdev; struct vmspace vmspace; struct prison prison; struct sigacts sigacts; struct lwp lwp; uintptr_t lwppos; int count; char *wmesg; count = 0; for (; p != NULL; p = proc.p_list.le_next) { if (KREAD(kd, (u_long)p, &proc)) { _kvm_err(kd, kd->program, "can't read proc at %p", p); return (-1); } if (KREAD(kd, (u_long)proc.p_ucred, &ucred)) { _kvm_err(kd, kd->program, "can't read ucred at %p", proc.p_ucred); return (-1); } proc.p_ucred = &ucred; switch(what & ~KERN_PROC_FLAGMASK) { case KERN_PROC_PID: if (proc.p_pid != (pid_t)arg) continue; break; case KERN_PROC_UID: if (ucred.cr_uid != (uid_t)arg) continue; break; case KERN_PROC_RUID: if (ucred.cr_ruid != (uid_t)arg) continue; break; } if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read pgrp at %p", proc.p_pgrp); return (-1); } proc.p_pgrp = &pgrp; if (proc.p_pptr) { if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) { _kvm_err(kd, kd->program, "can't read pproc at %p", proc.p_pptr); return (-1); } proc.p_pptr = &pproc; } if (proc.p_sigacts) { if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) { _kvm_err(kd, kd->program, "can't read sigacts at %p", proc.p_sigacts); return (-1); } proc.p_sigacts = &sigacts; } if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) { _kvm_err(kd, kd->program, "can't read session at %p", pgrp.pg_session); return (-1); } pgrp.pg_session = &sess; if ((proc.p_flags & P_CONTROLT) && sess.s_ttyp != NULL) { if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { _kvm_err(kd, kd->program, "can't read tty at %p", sess.s_ttyp); return (-1); } sess.s_ttyp = &tty; if (tty.t_dev != NULL) { if (KREAD(kd, (u_long)tty.t_dev, &cdev)) tty.t_dev = NULL; else tty.t_dev = &cdev; } if (tty.t_pgrp != NULL) { if (KREAD(kd, (u_long)tty.t_pgrp, &tpgrp)) { _kvm_err(kd, kd->program, "can't read tpgrp at %p", tty.t_pgrp); return (-1); } tty.t_pgrp = &tpgrp; } if (tty.t_session != NULL) { if (KREAD(kd, (u_long)tty.t_session, &tsess)) { _kvm_err(kd, kd->program, "can't read tsess at %p", tty.t_session); return (-1); } tty.t_session = &tsess; } } if (KREAD(kd, (u_long)proc.p_vmspace, &vmspace)) { _kvm_err(kd, kd->program, "can't read vmspace at %p", proc.p_vmspace); return (-1); } proc.p_vmspace = &vmspace; if (ucred.cr_prison != NULL) { if (KREAD(kd, (u_long)ucred.cr_prison, &prison)) { _kvm_err(kd, kd->program, "can't read prison at %p", ucred.cr_prison); return (-1); } ucred.cr_prison = &prison; } switch (what & ~KERN_PROC_FLAGMASK) { case KERN_PROC_PGRP: if (proc.p_pgrp->pg_id != (pid_t)arg) continue; break; case KERN_PROC_TTY: if ((proc.p_flags & P_CONTROLT) == 0 || dev2udev(proc.p_pgrp->pg_session->s_ttyp->t_dev) != (dev_t)arg) continue; break; } if ((bp = kinfo_resize_proc(kd, bp)) == NULL) return (-1); fill_kinfo_proc(&proc, bp); bp->kp_paddr = (uintptr_t)p; lwppos = kvm_firstlwp(kd, &lwp, &proc); if (lwppos == 0) { bp++; /* Just export the proc then */ count++; } while (lwppos && lwppos != (uintptr_t)-1) { if (p != lwp.lwp_proc) { _kvm_err(kd, kd->program, "lwp has wrong parent"); return (-1); } lwp.lwp_proc = &proc; if (KREAD(kd, (u_long)lwp.lwp_thread, &thread)) { _kvm_err(kd, kd->program, "can't read thread at %p", lwp.lwp_thread); return (-1); } lwp.lwp_thread = &thread; if (thread.td_gd) { if (KREAD(kd, (u_long)thread.td_gd, &gdata)) { _kvm_err(kd, kd->program, "can't read" " gd at %p", thread.td_gd); return(-1); } thread.td_gd = &gdata; } if (thread.td_wmesg) { wmesg = (void *)KREADSTR(kd, thread.td_wmesg); if (wmesg == NULL) { _kvm_err(kd, kd->program, "can't read" " wmesg %p", thread.td_wmesg); return(-1); } thread.td_wmesg = wmesg; } else { wmesg = NULL; } if ((bp = kinfo_resize_proc(kd, bp)) == NULL) return (-1); fill_kinfo_proc(&proc, bp); fill_kinfo_lwp(&lwp, &bp->kp_lwp); bp->kp_paddr = (uintptr_t)p; bp++; count++; if (wmesg) free(wmesg); if ((what & KERN_PROC_FLAG_LWP) == 0) break; lwppos = kvm_nextlwp(kd, lwppos, &lwp, &proc); } if (lwppos == (uintptr_t)-1) return(-1); } return (count); }
struct kinfo_proc * kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt) { size_t size; int mib[4], st, nprocs; if (ISALIVE(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = op; mib[3] = arg; st = sysctl(mib, 4, NULL, &size, NULL, (size_t)0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (NULL); } KVM_ALLOC(kd, procbase, size); st = sysctl(mib, 4, kd->procbase, &size, NULL, (size_t)0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (NULL); } if (size % sizeof(struct kinfo_proc) != 0) { _kvm_err(kd, kd->program, "proc size mismatch (%lu total, %lu chunks)", (u_long)size, (u_long)sizeof(struct kinfo_proc)); return (NULL); } nprocs = (int) (size / sizeof(struct kinfo_proc)); } else { struct nlist nl[4], *p; (void)memset(nl, 0, sizeof(nl)); nl[0].n_name = "_nprocs"; nl[1].n_name = "_allproc"; nl[2].n_name = "_zombproc"; nl[3].n_name = NULL; if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) continue; _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (NULL); } if (KREAD(kd, nl[0].n_value, &nprocs)) { _kvm_err(kd, kd->program, "can't read nprocs"); return (NULL); } size = nprocs * sizeof(*kd->procbase); KVM_ALLOC(kd, procbase, size); nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value, nl[2].n_value, nprocs); if (nprocs < 0) return (NULL); #ifdef notdef size = nprocs * sizeof(struct kinfo_proc); (void)realloc(kd->procbase, size); #endif } *cnt = nprocs; return (kd->procbase); }
/* * Read proc's from memory file into buffer bp, which has space to hold * at most maxcnt procs. */ static int kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, struct kinfo_proc *bp, int maxcnt) { struct session sess; struct eproc eproc; struct proc proc; struct process process; struct pgrp pgrp; struct tty tty; int cnt = 0; for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) { if (KREAD(kd, (u_long)p, &proc)) { _kvm_err(kd, kd->program, "can't read proc at %x", p); return (-1); } if (KREAD(kd, (u_long)proc.p_p, &process)) { _kvm_err(kd, kd->program, "can't read process at %x", proc.p_p); return (-1); } if (KREAD(kd, (u_long)process.ps_cred, &eproc.e_pcred) == 0) KREAD(kd, (u_long)eproc.e_pcred.pc_ucred, &eproc.e_ucred); switch (what) { case KERN_PROC_PID: if (proc.p_pid != (pid_t)arg) continue; break; case KERN_PROC_UID: if (eproc.e_ucred.cr_uid != (uid_t)arg) continue; break; case KERN_PROC_RUID: if (eproc.e_pcred.p_ruid != (uid_t)arg) continue; break; case KERN_PROC_ALL: if (proc.p_flag & P_SYSTEM) continue; break; } /* * We're going to add another proc to the set. If this * will overflow the buffer, assume the reason is because * nprocs (or the proc list) is corrupt and declare an error. */ if (cnt >= maxcnt) { _kvm_err(kd, kd->program, "nprocs corrupt"); return (-1); } /* * gather eproc */ eproc.e_paddr = p; if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read pgrp at %x", proc.p_pgrp); return (-1); } eproc.e_sess = pgrp.pg_session; eproc.e_pgid = pgrp.pg_id; eproc.e_jobc = pgrp.pg_jobc; if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) { _kvm_err(kd, kd->program, "can't read session at %x", pgrp.pg_session); return (-1); } if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) { if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { _kvm_err(kd, kd->program, "can't read tty at %x", sess.s_ttyp); return (-1); } eproc.e_tdev = tty.t_dev; eproc.e_tsess = tty.t_session; if (tty.t_pgrp != NULL) { if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read tpgrp at &x", tty.t_pgrp); return (-1); } eproc.e_tpgid = pgrp.pg_id; } else eproc.e_tpgid = -1; } else eproc.e_tdev = NODEV; eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0; if (sess.s_leader == p) eproc.e_flag |= EPROC_SLEADER; if (proc.p_wmesg) (void)kvm_read(kd, (u_long)proc.p_wmesg, eproc.e_wmesg, WMESGLEN); (void)kvm_read(kd, (u_long)proc.p_vmspace, &eproc.e_vm, sizeof(eproc.e_vm)); eproc.e_xsize = eproc.e_xrssize = 0; eproc.e_xccount = eproc.e_xswrss = 0; switch (what) { case KERN_PROC_PGRP: if (eproc.e_pgid != (pid_t)arg) continue; break; case KERN_PROC_TTY: if ((proc.p_flag & P_CONTROLT) == 0 || eproc.e_tdev != (dev_t)arg) continue; break; } bcopy(&proc, &bp->kp_proc, sizeof(proc)); bcopy(&eproc, &bp->kp_eproc, sizeof(eproc)); ++bp; ++cnt; } return (cnt); }
static char * _kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long va, u_long *cnt) { u_long addr, head; u_long offset; struct vm_map_entry vme; struct vm_amap amap; struct vm_anon *anonp, anon; struct vm_page pg; u_long slot; if (kd->swapspc == NULL) { kd->swapspc = _kvm_malloc(kd, (size_t)kd->nbpg); if (kd->swapspc == NULL) return (NULL); } /* * Look through the address map for the memory object * that corresponds to the given virtual address. * The header just has the entire valid range. */ head = (u_long)&p->p_vmspace->vm_map.header; addr = head; for (;;) { if (KREAD(kd, addr, &vme)) return (NULL); if (va >= vme.start && va < vme.end && vme.aref.ar_amap != NULL) break; addr = (u_long)vme.next; if (addr == head) return (NULL); } /* * we found the map entry, now to find the object... */ if (vme.aref.ar_amap == NULL) return (NULL); addr = (u_long)vme.aref.ar_amap; if (KREAD(kd, addr, &amap)) return (NULL); offset = va - vme.start; slot = offset / kd->nbpg + vme.aref.ar_pageoff; /* sanity-check slot number */ if (slot > amap.am_nslot) return (NULL); addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp); if (KREAD(kd, addr, &anonp)) return (NULL); addr = (u_long)anonp; if (KREAD(kd, addr, &anon)) return (NULL); addr = (u_long)anon.an_page; if (addr) { if (KREAD(kd, addr, &pg)) return (NULL); if (_kvm_pread(kd, kd->pmfd, kd->swapspc, (size_t)kd->nbpg, (off_t)pg.phys_addr) != kd->nbpg) return (NULL); } else { if (kd->swfd < 0 || _kvm_pread(kd, kd->swfd, kd->swapspc, (size_t)kd->nbpg, (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg) return (NULL); } /* Found the page. */ offset %= kd->nbpg; *cnt = kd->nbpg - offset; return (&kd->swapspc[(size_t)offset]); }
struct kinfo_proc * kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt) { int mib[4], st, nprocs; size_t size; if (kd->procbase != 0) { free((void *)kd->procbase); /* * Clear this pointer in case this call fails. Otherwise, * kvm_close() will free it again. */ kd->procbase = 0; } if (ISALIVE(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = op; mib[3] = arg; st = sysctl(mib, 4, NULL, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (0); } kd->procbase = _kvm_malloc(kd, size); if (kd->procbase == 0) return (0); st = sysctl(mib, 4, kd->procbase, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getprocs"); return (0); } if (size % sizeof(struct kinfo_proc) != 0) { _kvm_err(kd, kd->program, "proc size mismatch (%d total, %d chunks)", size, sizeof(struct kinfo_proc)); return (0); } nprocs = size / sizeof(struct kinfo_proc); } else { struct nlist nl[4], *p; memset(nl, 0, sizeof(nl)); nl[0].n_name = "_nprocs"; nl[1].n_name = "_allproc"; nl[2].n_name = "_zombproc"; nl[3].n_name = NULL; if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p) ; _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (0); } if (KREAD(kd, nl[0].n_value, &nprocs)) { _kvm_err(kd, kd->program, "can't read nprocs"); return (0); } size = nprocs * sizeof(struct kinfo_proc); kd->procbase = _kvm_malloc(kd, size); if (kd->procbase == 0) return (0); nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value, nl[2].n_value, nprocs); #ifdef notdef size = nprocs * sizeof(struct kinfo_proc); (void)realloc(kd->procbase, size); #endif } *cnt = nprocs; return (kd->procbase); }
struct kinfo_proc2 * kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt) { int mib[6], st, nprocs; struct user user; size_t size; if ((ssize_t)esize < 0) return (NULL); if (kd->procbase2 != NULL) { free(kd->procbase2); /* * Clear this pointer in case this call fails. Otherwise, * kvm_close() will free it again. */ kd->procbase2 = 0; } if (ISALIVE(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC2; mib[2] = op; mib[3] = arg; mib[4] = esize; mib[5] = 0; st = sysctl(mib, 6, NULL, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getproc2"); return (NULL); } mib[5] = size / esize; kd->procbase2 = _kvm_malloc(kd, size); if (kd->procbase2 == 0) return (NULL); st = sysctl(mib, 6, kd->procbase2, &size, NULL, 0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getproc2"); return (NULL); } nprocs = size / esize; } else { struct kinfo_proc2 kp2, *kp2p; struct kinfo_proc *kp; char *kp2c; int i; kp = kvm_getprocs(kd, op, arg, &nprocs); if (kp == NULL) return (NULL); kd->procbase2 = _kvm_malloc(kd, nprocs * esize); kp2c = (char *)kd->procbase2; kp2p = &kp2; for (i = 0; i < nprocs; i++, kp++) { memset(kp2p, 0, sizeof(kp2)); kp2p->p_paddr = PTRTOINT64(kp->kp_eproc.e_paddr); kp2p->p_addr = PTRTOINT64(kp->kp_proc.p_addr); kp2p->p_fd = PTRTOINT64(kp->kp_proc.p_fd); kp2p->p_stats = PTRTOINT64(kp->kp_proc.p_stats); kp2p->p_limit = PTRTOINT64(kp->kp_eproc.e_limit); kp2p->p_vmspace = PTRTOINT64(kp->kp_proc.p_vmspace); kp2p->p_sigacts = PTRTOINT64(kp->kp_proc.p_sigacts); kp2p->p_sess = PTRTOINT64(kp->kp_eproc.e_sess); kp2p->p_tsess = 0; kp2p->p_ru = PTRTOINT64(kp->kp_proc.p_ru); kp2p->p_eflag = 0; kp2p->p_exitsig = kp->kp_proc.p_exitsig; kp2p->p_flag = kp->kp_proc.p_flag; kp2p->p_pid = kp->kp_proc.p_pid; kp2p->p_ppid = kp->kp_eproc.e_ppid; #if 0 kp2p->p_sid = kp->kp_eproc.e_sid; #else kp2p->p_sid = -1; /* XXX */ #endif kp2p->p__pgid = kp->kp_eproc.e_pgid; kp2p->p_tpgid = -1; kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid; kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid; kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid; kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid; memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups, MIN(sizeof(kp2p->p_groups), sizeof(kp->kp_eproc.e_ucred.cr_groups))); kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups; kp2p->p_jobc = kp->kp_eproc.e_jobc; kp2p->p_tdev = kp->kp_eproc.e_tdev; kp2p->p_tpgid = kp->kp_eproc.e_tpgid; kp2p->p_tsess = PTRTOINT64(kp->kp_eproc.e_tsess); kp2p->p_estcpu = kp->kp_proc.p_estcpu; kp2p->p_rtime_sec = kp->kp_proc.p_estcpu; kp2p->p_rtime_usec = kp->kp_proc.p_estcpu; kp2p->p_cpticks = kp->kp_proc.p_cpticks; kp2p->p_pctcpu = kp->kp_proc.p_pctcpu; kp2p->p_swtime = kp->kp_proc.p_swtime; kp2p->p_slptime = kp->kp_proc.p_slptime; kp2p->p_schedflags = 0; kp2p->p_uticks = kp->kp_proc.p_uticks; kp2p->p_sticks = kp->kp_proc.p_sticks; kp2p->p_iticks = kp->kp_proc.p_iticks; kp2p->p_tracep = PTRTOINT64(kp->kp_proc.p_tracep); kp2p->p_traceflag = kp->kp_proc.p_traceflag; kp2p->p_holdcnt = 1; kp2p->p_siglist = kp->kp_proc.p_siglist; kp2p->p_sigmask = kp->kp_proc.p_sigmask; kp2p->p_sigignore = kp->kp_proc.p_sigignore; kp2p->p_sigcatch = kp->kp_proc.p_sigcatch; kp2p->p_stat = kp->kp_proc.p_stat; kp2p->p_priority = kp->kp_proc.p_priority; kp2p->p_usrpri = kp->kp_proc.p_usrpri; kp2p->p_nice = kp->kp_proc.p_nice; kp2p->p_xstat = kp->kp_proc.p_xstat; kp2p->p_acflag = kp->kp_proc.p_acflag; strncpy(kp2p->p_comm, kp->kp_proc.p_comm, MIN(sizeof(kp2p->p_comm), sizeof(kp->kp_proc.p_comm))); strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg, sizeof(kp2p->p_wmesg)); kp2p->p_wchan = PTRTOINT64(kp->kp_proc.p_wchan); strncpy(kp2p->p_login, kp->kp_eproc.e_login, sizeof(kp2p->p_login)); kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize; kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize; kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize; kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize; kp2p->p_eflag = kp->kp_eproc.e_flag; if (P_ZOMBIE(&kp->kp_proc) || kp->kp_proc.p_addr == NULL || KREAD(kd, (u_long)kp->kp_proc.p_addr, &user)) { kp2p->p_uvalid = 0; } else { kp2p->p_uvalid = 1; kp2p->p_ustart_sec = user.u_stats.p_start.tv_sec; kp2p->p_ustart_usec = user.u_stats.p_start.tv_usec; kp2p->p_uutime_sec = user.u_stats.p_ru.ru_utime.tv_sec; kp2p->p_uutime_usec = user.u_stats.p_ru.ru_utime.tv_usec; kp2p->p_ustime_sec = user.u_stats.p_ru.ru_stime.tv_sec; kp2p->p_ustime_usec = user.u_stats.p_ru.ru_stime.tv_usec; kp2p->p_uru_maxrss = user.u_stats.p_ru.ru_maxrss; kp2p->p_uru_ixrss = user.u_stats.p_ru.ru_ixrss; kp2p->p_uru_idrss = user.u_stats.p_ru.ru_idrss; kp2p->p_uru_isrss = user.u_stats.p_ru.ru_isrss; kp2p->p_uru_minflt = user.u_stats.p_ru.ru_minflt; kp2p->p_uru_majflt = user.u_stats.p_ru.ru_majflt; kp2p->p_uru_nswap = user.u_stats.p_ru.ru_nswap; kp2p->p_uru_inblock = user.u_stats.p_ru.ru_inblock; kp2p->p_uru_oublock = user.u_stats.p_ru.ru_oublock; kp2p->p_uru_msgsnd = user.u_stats.p_ru.ru_msgsnd; kp2p->p_uru_msgrcv = user.u_stats.p_ru.ru_msgrcv; kp2p->p_uru_nsignals = user.u_stats.p_ru.ru_nsignals; kp2p->p_uru_nvcsw = user.u_stats.p_ru.ru_nvcsw; kp2p->p_uru_nivcsw = user.u_stats.p_ru.ru_nivcsw; kp2p->p_uctime_sec = user.u_stats.p_cru.ru_utime.tv_sec + user.u_stats.p_cru.ru_stime.tv_sec; kp2p->p_uctime_usec = user.u_stats.p_cru.ru_utime.tv_usec + user.u_stats.p_cru.ru_stime.tv_usec; } memcpy(kp2c, &kp2, esize); kp2c += esize; } free(kd->procbase); } *cnt = nprocs; return (kd->procbase2); }
int read_ns(void) { struct inpcbtable pcbtable; struct inpcb *head, *prev, *next; struct inpcb inpcb; struct socket sockb; struct tcpcb tcpcb; void *off; int istcp; if (kd == NULL) { return (0); } num_ns = 0; if (namelist[X_TCBTABLE].n_value == 0) return 0; if (protos & TCP) { off = NPTR(X_TCBTABLE); istcp = 1; } else if (protos & UDP) { off = NPTR(X_UDBTABLE); istcp = 0; } else { error("No protocols to display"); return 0; } again: KREAD(off, &pcbtable, sizeof (struct inpcbtable)); prev = head = (struct inpcb *)&((struct inpcbtable *)off)->inpt_queue; next = CIRCLEQ_FIRST(&pcbtable.inpt_queue); while (next != head) { KREAD(next, &inpcb, sizeof (inpcb)); if (CIRCLEQ_PREV(&inpcb, inp_queue) != prev) { error("Kernel state in transition"); return 0; } prev = next; next = CIRCLEQ_NEXT(&inpcb, inp_queue); if (!aflag) { if (!(inpcb.inp_flags & INP_IPV6) && inet_lnaof(inpcb.inp_faddr) == INADDR_ANY) continue; if ((inpcb.inp_flags & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inpcb.inp_faddr6)) continue; } KREAD(inpcb.inp_socket, &sockb, sizeof (sockb)); if (istcp) { KREAD(inpcb.inp_ppcb, &tcpcb, sizeof (tcpcb)); if (!aflag && tcpcb.t_state <= TCPS_LISTEN) continue; enter(&inpcb, &sockb, tcpcb.t_state, "tcp"); } else enter(&inpcb, &sockb, 0, "udp"); } if (istcp && (protos & UDP)) { istcp = 0; off = NPTR(X_UDBTABLE); goto again; } num_disp = num_ns; return 0; }
/* * Read proc's from memory file into buffer bp, which has space to hold * at most maxcnt procs. */ static int kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, struct kinfo_proc *bp, int maxcnt) { int cnt = 0; int nlwps; struct kinfo_lwp *kl; struct eproc eproc; struct pgrp pgrp; struct session sess; struct tty tty; struct proc proc; for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) { if (KREAD(kd, (u_long)p, &proc)) { _kvm_err(kd, kd->program, "can't read proc at %p", p); return (-1); } if (_kvm_convertcred(kd, (u_long)proc.p_cred, &eproc) != 0) { _kvm_err(kd, kd->program, "can't read proc credentials at %p", p); return (-1); } switch (what) { case KERN_PROC_PID: if (proc.p_pid != (pid_t)arg) continue; break; case KERN_PROC_UID: if (eproc.e_ucred.cr_uid != (uid_t)arg) continue; break; case KERN_PROC_RUID: if (eproc.e_pcred.p_ruid != (uid_t)arg) continue; break; } /* * We're going to add another proc to the set. If this * will overflow the buffer, assume the reason is because * nprocs (or the proc list) is corrupt and declare an error. */ if (cnt >= maxcnt) { _kvm_err(kd, kd->program, "nprocs corrupt"); return (-1); } /* * gather eproc */ eproc.e_paddr = p; if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read pgrp at %p", proc.p_pgrp); return (-1); } eproc.e_sess = pgrp.pg_session; eproc.e_pgid = pgrp.pg_id; eproc.e_jobc = pgrp.pg_jobc; if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) { _kvm_err(kd, kd->program, "can't read session at %p", pgrp.pg_session); return (-1); } if ((proc.p_lflag & PL_CONTROLT) && sess.s_ttyp != NULL) { if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { _kvm_err(kd, kd->program, "can't read tty at %p", sess.s_ttyp); return (-1); } eproc.e_tdev = (uint32_t)tty.t_dev; eproc.e_tsess = tty.t_session; if (tty.t_pgrp != NULL) { if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) { _kvm_err(kd, kd->program, "can't read tpgrp at %p", tty.t_pgrp); return (-1); } eproc.e_tpgid = pgrp.pg_id; } else eproc.e_tpgid = -1; } else eproc.e_tdev = (uint32_t)NODEV; eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0; eproc.e_sid = sess.s_sid; if (sess.s_leader == p) eproc.e_flag |= EPROC_SLEADER; /* * Fill in the old-style proc.p_wmesg by copying the wmesg * from the first available LWP. */ kl = kvm_getlwps(kd, proc.p_pid, (u_long)PTRTOUINT64(eproc.e_paddr), sizeof(struct kinfo_lwp), &nlwps); if (kl) { if (nlwps > 0) { strcpy(eproc.e_wmesg, kl[0].l_wmesg); } } (void)kvm_read(kd, (u_long)proc.p_vmspace, &eproc.e_vm, sizeof(eproc.e_vm)); eproc.e_xsize = eproc.e_xrssize = 0; eproc.e_xccount = eproc.e_xswrss = 0; switch (what) { case KERN_PROC_PGRP: if (eproc.e_pgid != (pid_t)arg) continue; break; case KERN_PROC_TTY: if ((proc.p_lflag & PL_CONTROLT) == 0 || eproc.e_tdev != (dev_t)arg) continue; break; } memcpy(&bp->kp_proc, &proc, sizeof(proc)); memcpy(&bp->kp_eproc, &eproc, sizeof(eproc)); ++bp; ++cnt; } return (cnt); }
int main(int argc, char *argv[]) { struct msgbuf *bufp, cur; char *bp, *ep, *memf, *nextp, *nlistf, *p, *q, *visbp; kvm_t *kd; size_t buflen, bufpos; long pri; int ch, clear; bool all; all = false; clear = false; (void) setlocale(LC_CTYPE, ""); memf = nlistf = NULL; while ((ch = getopt(argc, argv, "acM:N:")) != -1) switch(ch) { case 'a': all = true; break; case 'c': clear = true; break; case 'M': memf = optarg; break; case 'N': nlistf = optarg; break; case '?': default: usage(); } argc -= optind; if (argc != 0) usage(); if (memf == NULL) { /* * Running kernel. Use sysctl. This gives an unwrapped * buffer as a side effect. */ if (sysctlbyname("kern.msgbuf", NULL, &buflen, NULL, 0) == -1) err(1, "sysctl kern.msgbuf"); if ((bp = malloc(buflen + 2)) == NULL) errx(1, "malloc failed"); if (sysctlbyname("kern.msgbuf", bp, &buflen, NULL, 0) == -1) err(1, "sysctl kern.msgbuf"); if (clear) if (sysctlbyname("kern.msgbuf_clear", NULL, NULL, &clear, sizeof(int))) err(1, "sysctl kern.msgbuf_clear"); } else { /* Read in kernel message buffer and do sanity checks. */ kd = kvm_open(nlistf, memf, NULL, O_RDONLY, "dmesg"); if (kd == NULL) exit (1); if (kvm_nlist(kd, nl) == -1) errx(1, "kvm_nlist: %s", kvm_geterr(kd)); if (nl[X_MSGBUF].n_type == 0) errx(1, "%s: msgbufp not found", nlistf ? nlistf : "namelist"); if (KREAD(nl[X_MSGBUF].n_value, bufp) || KREAD((long)bufp, cur)) errx(1, "kvm_read: %s", kvm_geterr(kd)); if (cur.msg_magic != MSG_MAGIC) errx(1, "kernel message buffer has different magic " "number"); if ((bp = malloc(cur.msg_size + 2)) == NULL) errx(1, "malloc failed"); /* Unwrap the circular buffer to start from the oldest data. */ bufpos = MSGBUF_SEQ_TO_POS(&cur, cur.msg_wseq); if (kvm_read(kd, (long)&cur.msg_ptr[bufpos], bp, cur.msg_size - bufpos) != (ssize_t)(cur.msg_size - bufpos)) errx(1, "kvm_read: %s", kvm_geterr(kd)); if (bufpos != 0 && kvm_read(kd, (long)cur.msg_ptr, &bp[cur.msg_size - bufpos], bufpos) != (ssize_t)bufpos) errx(1, "kvm_read: %s", kvm_geterr(kd)); kvm_close(kd); buflen = cur.msg_size; } /* * Ensure that the buffer ends with a newline and a \0 to avoid * complications below. We left space above. */ if (buflen == 0 || bp[buflen - 1] != '\n') bp[buflen++] = '\n'; bp[buflen] = '\0'; if ((visbp = malloc(4 * buflen + 1)) == NULL) errx(1, "malloc failed"); /* * The message buffer is circular, but has been unwrapped so that * the oldest data comes first. The data will be preceded by \0's * if the message buffer was not full. */ p = bp; ep = &bp[buflen]; if (*p == '\0') { /* Strip leading \0's */ while (*p == '\0') p++; } else if (!all) { /* Skip the first line, since it is probably incomplete. */ p = memchr(p, '\n', ep - p); p++; } for (; p < ep; p = nextp) { nextp = memchr(p, '\n', ep - p); nextp++; /* Skip ^<[0-9]+> syslog sequences. */ if (*p == '<' && isdigit(*(p+1))) { errno = 0; pri = strtol(p + 1, &q, 10); if (*q == '>' && pri >= 0 && pri < INT_MAX && errno == 0) { if (LOG_FAC(pri) != LOG_KERN && !all) continue; p = q + 1; } } (void)strvisx(visbp, p, nextp - p, 0); (void)printf("%s", visbp); } exit(0); }
struct kinfo_proc2 * kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt) { size_t size; int mib[6], st, nprocs; struct pstats pstats; if (ISSYSCTL(kd)) { size = 0; mib[0] = CTL_KERN; mib[1] = KERN_PROC2; mib[2] = op; mib[3] = arg; mib[4] = (int)esize; again: mib[5] = 0; st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0); if (st == -1) { _kvm_syserr(kd, kd->program, "kvm_getproc2"); return (NULL); } mib[5] = (int) (size / esize); KVM_ALLOC(kd, procbase2, size); st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0); if (st == -1) { if (errno == ENOMEM) { goto again; } _kvm_syserr(kd, kd->program, "kvm_getproc2"); return (NULL); } nprocs = (int) (size / esize); } else { char *kp2c; struct kinfo_proc *kp; struct kinfo_proc2 kp2, *kp2p; struct kinfo_lwp *kl; int i, nlwps; kp = kvm_getprocs(kd, op, arg, &nprocs); if (kp == NULL) return (NULL); size = nprocs * esize; KVM_ALLOC(kd, procbase2, size); kp2c = (char *)(void *)kd->procbase2; kp2p = &kp2; for (i = 0; i < nprocs; i++, kp++) { struct timeval tv; kl = kvm_getlwps(kd, kp->kp_proc.p_pid, (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr), sizeof(struct kinfo_lwp), &nlwps); if (kl == NULL) { _kvm_syserr(kd, NULL, "kvm_getlwps() failed on process %u\n", kp->kp_proc.p_pid); if (nlwps == 0) return NULL; else continue; } /* We use kl[0] as the "representative" LWP */ memset(kp2p, 0, sizeof(kp2)); kp2p->p_forw = kl[0].l_forw; kp2p->p_back = kl[0].l_back; kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr); kp2p->p_addr = kl[0].l_addr; kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd); kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi); kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats); kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit); kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace); kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts); kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess); kp2p->p_tsess = 0; #if 1 /* XXX: dsl - p_ru was only ever non-zero for zombies */ kp2p->p_ru = 0; #else kp2p->p_ru = PTRTOUINT64(pstats.p_ru); #endif kp2p->p_eflag = 0; kp2p->p_exitsig = kp->kp_proc.p_exitsig; kp2p->p_flag = kp->kp_proc.p_flag; kp2p->p_pid = kp->kp_proc.p_pid; kp2p->p_ppid = kp->kp_eproc.e_ppid; kp2p->p_sid = kp->kp_eproc.e_sid; kp2p->p__pgid = kp->kp_eproc.e_pgid; kp2p->p_tpgid = -1 /* XXX NO_PGID! */; kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid; kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid; kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid; kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid; kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid; kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid; /*CONSTCOND*/ memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups, MIN(sizeof(kp2p->p_groups), sizeof(kp->kp_eproc.e_ucred.cr_groups))); kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups; kp2p->p_jobc = kp->kp_eproc.e_jobc; kp2p->p_tdev = kp->kp_eproc.e_tdev; kp2p->p_tpgid = kp->kp_eproc.e_tpgid; kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess); kp2p->p_estcpu = 0; bintime2timeval(&kp->kp_proc.p_rtime, &tv); kp2p->p_rtime_sec = (uint32_t)tv.tv_sec; kp2p->p_rtime_usec = (uint32_t)tv.tv_usec; kp2p->p_cpticks = kl[0].l_cpticks; kp2p->p_pctcpu = kp->kp_proc.p_pctcpu; kp2p->p_swtime = kl[0].l_swtime; kp2p->p_slptime = kl[0].l_slptime; #if 0 /* XXX thorpej */ kp2p->p_schedflags = kp->kp_proc.p_schedflags; #else kp2p->p_schedflags = 0; #endif kp2p->p_uticks = kp->kp_proc.p_uticks; kp2p->p_sticks = kp->kp_proc.p_sticks; kp2p->p_iticks = kp->kp_proc.p_iticks; kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep); kp2p->p_traceflag = kp->kp_proc.p_traceflag; kp2p->p_holdcnt = kl[0].l_holdcnt; memcpy(&kp2p->p_siglist, &kp->kp_proc.p_sigpend.sp_set, sizeof(ki_sigset_t)); memset(&kp2p->p_sigmask, 0, sizeof(ki_sigset_t)); memcpy(&kp2p->p_sigignore, &kp->kp_proc.p_sigctx.ps_sigignore, sizeof(ki_sigset_t)); memcpy(&kp2p->p_sigcatch, &kp->kp_proc.p_sigctx.ps_sigcatch, sizeof(ki_sigset_t)); kp2p->p_stat = kl[0].l_stat; kp2p->p_priority = kl[0].l_priority; kp2p->p_usrpri = kl[0].l_priority; kp2p->p_nice = kp->kp_proc.p_nice; kp2p->p_xstat = kp->kp_proc.p_xstat; kp2p->p_acflag = kp->kp_proc.p_acflag; /*CONSTCOND*/ strncpy(kp2p->p_comm, kp->kp_proc.p_comm, MIN(sizeof(kp2p->p_comm), sizeof(kp->kp_proc.p_comm))); strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg, sizeof(kp2p->p_wmesg)); kp2p->p_wchan = kl[0].l_wchan; strncpy(kp2p->p_login, kp->kp_eproc.e_login, sizeof(kp2p->p_login)); kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize; kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize; kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize; kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize; kp2p->p_vm_vsize = kp->kp_eproc.e_vm.vm_map.size / kd->nbpg; /* Adjust mapped size */ kp2p->p_vm_msize = (kp->kp_eproc.e_vm.vm_map.size / kd->nbpg) - kp->kp_eproc.e_vm.vm_issize + kp->kp_eproc.e_vm.vm_ssize; kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag; kp2p->p_realflag = kp->kp_proc.p_flag; kp2p->p_nlwps = kp->kp_proc.p_nlwps; kp2p->p_nrlwps = kp->kp_proc.p_nrlwps; kp2p->p_realstat = kp->kp_proc.p_stat; if (P_ZOMBIE(&kp->kp_proc) || kp->kp_proc.p_stats == NULL || KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) { kp2p->p_uvalid = 0; } else { kp2p->p_uvalid = 1; kp2p->p_ustart_sec = (u_int32_t) pstats.p_start.tv_sec; kp2p->p_ustart_usec = (u_int32_t) pstats.p_start.tv_usec; kp2p->p_uutime_sec = (u_int32_t) pstats.p_ru.ru_utime.tv_sec; kp2p->p_uutime_usec = (u_int32_t) pstats.p_ru.ru_utime.tv_usec; kp2p->p_ustime_sec = (u_int32_t) pstats.p_ru.ru_stime.tv_sec; kp2p->p_ustime_usec = (u_int32_t) pstats.p_ru.ru_stime.tv_usec; kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss; kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss; kp2p->p_uru_idrss = pstats.p_ru.ru_idrss; kp2p->p_uru_isrss = pstats.p_ru.ru_isrss; kp2p->p_uru_minflt = pstats.p_ru.ru_minflt; kp2p->p_uru_majflt = pstats.p_ru.ru_majflt; kp2p->p_uru_nswap = pstats.p_ru.ru_nswap; kp2p->p_uru_inblock = pstats.p_ru.ru_inblock; kp2p->p_uru_oublock = pstats.p_ru.ru_oublock; kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd; kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv; kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals; kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw; kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw; kp2p->p_uctime_sec = (u_int32_t) (pstats.p_cru.ru_utime.tv_sec + pstats.p_cru.ru_stime.tv_sec); kp2p->p_uctime_usec = (u_int32_t) (pstats.p_cru.ru_utime.tv_usec + pstats.p_cru.ru_stime.tv_usec); } memcpy(kp2c, &kp2, esize); kp2c += esize; } } *cnt = nprocs; return (kd->procbase2); }