Exemplo n.º 1
0
static int
_kvm_read_phys(kvm_t *kd, off_t pos, void *buf, size_t size)
{

    /* XXX This has to be a raw file read, kvm_read is virtual. */
    if (lseek(kd->pmfd, pos, SEEK_SET) == -1) {
        _kvm_syserr(kd, kd->program, "_kvm_read_phys: lseek");
        return (0);
    }
    if (read(kd->pmfd, buf, size) != (ssize_t)size) {
        _kvm_syserr(kd, kd->program, "_kvm_read_phys: read");
        return (0);
    }
    return (1);
}
Exemplo n.º 2
0
int
_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
{
	struct vmstate *vm = kd->vmst;
	pd_entry_t pd;
	pt_entry_t pte;
	u_long pte_pa;

	if (kd->vmst->minidump)
		return (_kvm_minidump_kvatop(kd, va, pa));

	if (vm->l1pt == NULL)
		return (_kvm_pa2off(kd, va, pa, PAGE_SIZE));
	pd = vm->l1pt[L1_IDX(va)];
	if (!l1pte_valid(pd))
		goto invalid;
	if (l1pte_section_p(pd)) {
		/* 1MB section mapping. */
		*pa = ((u_long)pd & L1_S_ADDR_MASK) + (va & L1_S_OFFSET);
		return  (_kvm_pa2off(kd, *pa, pa, L1_S_SIZE));
	}
	pte_pa = (pd & L1_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
	_kvm_pa2off(kd, pte_pa, (off_t *)&pte_pa, L1_S_SIZE);
	if (lseek(kd->pmfd, pte_pa, 0) == -1) {
		_kvm_syserr(kd, kd->program, "_kvm_kvatop: lseek");
		goto invalid;
	}
	if (read(kd->pmfd, &pte, sizeof(pte)) != sizeof (pte)) {
		_kvm_syserr(kd, kd->program, "_kvm_kvatop: read");
		goto invalid;
	}
	if (!l2pte_valid(pte)) {
		goto invalid;
	}
	if ((pte & L2_TYPE_MASK) == L2_TYPE_L) {
		*pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
		return (_kvm_pa2off(kd, *pa, pa, L2_L_SIZE));
	}
	*pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
	return (_kvm_pa2off(kd, *pa, pa, PAGE_SIZE));
invalid:
	_kvm_err(kd, 0, "Invalid address (%lx)", va);
	return 0;
}
Exemplo n.º 3
0
static int
_sparc64_read_phys(kvm_t *kd, off_t pos, void *buf, size_t size)
{

	/* XXX This has to be a raw file read, kvm_read is virtual. */
	if (pread(kd->pmfd, buf, size, pos) != (ssize_t)size) {
		_kvm_syserr(kd, kd->program, "_sparc64_read_phys: pread");
		return (0);
	}
	return (1);
}
Exemplo n.º 4
0
/*
 * Read from user space.  The user context is given by pid.
 */
ssize_t
kvm_uread(kvm_t *kd, pid_t pid, u_long uva, char *buf, size_t len)
{
	char *cp;
	char procfile[MAXPATHLEN];
	ssize_t amount;
	int fd;

	if (!kvm_ishost(kd)) { /* XXX: vkernels */
		_kvm_err(kd, kd->program,
		    "cannot read user space from dead kernel");
		return (0);
	}

	sprintf(procfile, "/proc/%d/mem", pid);
	fd = open(procfile, O_RDONLY, 0);
	if (fd < 0) {
		_kvm_err(kd, kd->program, "cannot open %s", procfile);
		close(fd);
		return (0);
	}

	cp = buf;
	while (len > 0) {
		errno = 0;
		if (lseek(fd, (off_t)uva, 0) == -1 && errno != 0) {
			_kvm_err(kd, kd->program, "invalid address (%lx) in %s",
			    uva, procfile);
			break;
		}
		amount = read(fd, cp, len);
		if (amount < 0) {
			_kvm_syserr(kd, kd->program, "error reading %s",
			    procfile);
			break;
		}
		if (amount == 0) {
			_kvm_err(kd, kd->program, "EOF reading %s", procfile);
			break;
		}
		cp += amount;
		uva += amount;
		len -= amount;
	}

	close(fd);
	return ((ssize_t)(cp - buf));
}
Exemplo n.º 5
0
static int
_arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
{
	struct vmstate *vm = kd->vmst;
	arm_pd_entry_t pd;
	arm_pt_entry_t pte;
	arm_physaddr_t pte_pa;
	off_t pte_off;

	if (vm->l1pt == NULL)
		return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
	pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
	if (!l1pte_valid(pd))
		goto invalid;
	if (l1pte_section_p(pd)) {
		/* 1MB section mapping. */
		*pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
		return  (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
	}
	pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
	_kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
	if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
		_kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
		goto invalid;
	}
	pte = _kvm32toh(kd, pte);
	if (!l2pte_valid(pte)) {
		goto invalid;
	}
	if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
		*pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
		return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
	}
	*pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
	return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
invalid:
	_kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
	return 0;
}
Exemplo n.º 6
0
Arquivo: kvm.c Projeto: coyizumi/cs111
static kvm_t *
_kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
{
	struct stat st;

	kd->vmfd = -1;
	kd->pmfd = -1;
	kd->nlfd = -1;
	kd->vmst = 0;
	kd->procbase = 0;
	kd->argspc = 0;
	kd->argv = 0;

	if (uf == 0)
		uf = getbootfile();
	else if (strlen(uf) >= MAXPATHLEN) {
		_kvm_err(kd, kd->program, "exec file name too long");
		goto failed;
	}
	if (flag & ~O_RDWR) {
		_kvm_err(kd, kd->program, "bad flags arg");
		goto failed;
	}
	if (mf == 0)
		mf = _PATH_MEM;

	if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
		_kvm_syserr(kd, kd->program, "%s", mf);
		goto failed;
	}
	if (fstat(kd->pmfd, &st) < 0) {
		_kvm_syserr(kd, kd->program, "%s", mf);
		goto failed;
	}
	if (S_ISREG(st.st_mode) && st.st_size <= 0) {
		errno = EINVAL;
		_kvm_syserr(kd, kd->program, "empty file");
		goto failed;
	}
	if (S_ISCHR(st.st_mode)) {
		/*
		 * If this is a character special device, then check that
		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
		 * make it work for either /dev/mem or /dev/kmem -- in either
		 * case you're working with a live kernel.)
		 */
		if (strcmp(mf, _PATH_DEVNULL) == 0) {
			kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC);
			return (kd);
		} else if (strcmp(mf, _PATH_MEM) == 0) {
			if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) <
			    0) {
				_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
				goto failed;
			}
			return (kd);
		}
	}
	/*
	 * This is a crash dump.
	 * Initialize the virtual address translation machinery,
	 * but first setup the namelist fd.
	 */
	if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
		_kvm_syserr(kd, kd->program, "%s", uf);
		goto failed;
	}
	if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0)
		kd->rawdump = 1;
	if (_kvm_initvtop(kd) < 0)
		goto failed;
	return (kd);
failed:
	/*
	 * Copy out the error if doing sane error semantics.
	 */
	if (errout != 0)
		strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
	(void)kvm_close(kd);
	return (0);
}
Exemplo n.º 7
0
char *
kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[2], st, n, nfiles, nprocs;
	size_t size;

	_kvm_syserr(kd, kd->program, "kvm_getfiles has been broken for years");
	return (0);
	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_FILE;
		st = sysctl(mib, 2, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < (int)size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		st = sysctl(mib, 2, kd->argspc, &size, NULL, 0);
		if (st != 0) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		nfiles = size / sizeof(struct xfile);
	} else {
		struct nlist nl[4], *p;

		nl[0].n_name = "_allproc";
		nl[1].n_name = "_nprocs";
		nl[2].n_name = "_nfiles";
		nl[3].n_name = 0;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[1].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		if (KREAD(kd, nl[2].n_value, &nfiles)) {
			_kvm_err(kd, kd->program, "can't read nfiles");
			return (0);
		}
		size = sizeof(void *) + (nfiles + 10) * sizeof(struct file);
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < (int)size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		n = kvm_deadfiles(kd, op, arg, nl[0].n_value, nprocs);
		if (n != nfiles) {
			_kvm_err(kd, kd->program, "inconsistant nfiles");
			return (0);
		}
		nfiles = n;
	}
	*cnt = nfiles;
	return (kd->argspc);
}
Exemplo n.º 8
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[4], st, nprocs;
	int miblen = ((op & ~KERN_PROC_FLAGMASK) == KERN_PROC_ALL) ? 3 : 4;
	size_t size;

	if (kd->procbase != 0) {
		free((void *)kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}
	if (kvm_ishost(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		st = sysctl(mib, miblen, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		do {
			size += size / 10;
			kd->procbase = (struct kinfo_proc *)
			    _kvm_realloc(kd, kd->procbase, size);
			if (kd->procbase == 0)
				return (0);
			st = sysctl(mib, miblen, kd->procbase, &size, NULL, 0);
		} while (st == -1 && errno == ENOMEM);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		if (size % sizeof(struct kinfo_proc) != 0) {
			_kvm_err(kd, kd->program,
				"proc size mismatch (%zd total, %zd chunks)",
				size, sizeof(struct kinfo_proc));
			return (0);
		}
		nprocs = size / sizeof(struct kinfo_proc);
	} else {
		struct nlist nl[4], *p;

		nl[0].n_name = "_nprocs";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = 0;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
				      nl[2].n_value);
#ifdef notdef
		size = nprocs * sizeof(struct kinfo_proc);
		(void)realloc(kd->procbase, size);
#endif
	}
	*cnt = nprocs;
	return (kd->procbase);
}
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
{
	size_t size;
	int mib[4], st, nprocs;

	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		st = sysctl(mib, 4, NULL, &size, NULL, (size_t)0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (NULL);
		}
		KVM_ALLOC(kd, procbase, size);
		st = sysctl(mib, 4, kd->procbase, &size, NULL, (size_t)0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (NULL);
		}
		if (size % sizeof(struct kinfo_proc) != 0) {
			_kvm_err(kd, kd->program,
			    "proc size mismatch (%lu total, %lu chunks)",
			    (u_long)size, (u_long)sizeof(struct kinfo_proc));
			return (NULL);
		}
		nprocs = (int) (size / sizeof(struct kinfo_proc));
	} else {
		struct nlist nl[4], *p;

		(void)memset(nl, 0, sizeof(nl));
		nl[0].n_name = "_nprocs";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = NULL;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				continue;
			_kvm_err(kd, kd->program,
			    "%s: no such symbol", p->n_name);
			return (NULL);
		}
		if (KREAD(kd, nl[0].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (NULL);
		}
		size = nprocs * sizeof(*kd->procbase);
		KVM_ALLOC(kd, procbase, size);
		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
		    nl[2].n_value, nprocs);
		if (nprocs < 0)
			return (NULL);
#ifdef notdef
		size = nprocs * sizeof(struct kinfo_proc);
		(void)realloc(kd->procbase, size);
#endif
	}
	*cnt = nprocs;
	return (kd->procbase);
}
/*
 * Used to translate a virtual address to a physical address for systems
 * running under PAE mode. Three levels of virtual memory pages are handled
 * here: the per-CPU L3 page, the 4 L2 PDs and the PTs.
 */
int
_kvm_kvatop_i386pae(kvm_t *kd, vaddr_t va, paddr_t *pa)
{
	cpu_kcore_hdr_t *cpu_kh;
	u_long page_off;
	pd_entry_t pde;
	pt_entry_t pte;
	paddr_t pde_pa, pte_pa;

	cpu_kh = kd->cpu_data;
	page_off = va & PGOFSET;
	
	/*
	 * Find and read the PDE. Ignore the L3, as it is only a per-CPU
	 * page, not needed for kernel VA => PA translations.
	 * Remember that the 4 L2 pages are contiguous, so it is safe
	 * to increment pdppaddr to compute the address of the PDE.
	 * pdppaddr being PAGE_SIZE aligned, we mask the option bits.
	 */
	pde_pa = (cpu_kh->pdppaddr & PG_FRAME) + (pl2_pi(va) * sizeof(pde));
	if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PDE");
		goto lose;
	}

	/*
	 * Find and read the page table entry.
	 */
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid PDE)");
		goto lose;
	}
	if ((pde & PG_PS) != 0) {
		/*
		 * This is a 2MB page.
		 */
		page_off = va & ((vaddr_t)~PG_LGFRAME);
		*pa = (pde & PG_LGFRAME) + page_off;
		return (int)(NBPD_L2 - page_off);
	}

	pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
	if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pte),
	    _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
		_kvm_syserr(kd, 0, "could not read PTE");
		goto lose;
	}

	/*
	 * Validate the PTE and return the physical address.
	 */
	if ((pte & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid PTE)");
		goto lose;
	}
	*pa = (pte & PG_FRAME) + page_off;
	return (int)(NBPG - page_off);

lose:
	*pa = (paddr_t)~0L;
	return 0;

}
Exemplo n.º 11
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[4], st, nprocs;
	size_t size;

	if (kd->procbase != 0) {
		free((void *)kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}
	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		st = sysctl(mib, 4, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		kd->procbase = _kvm_malloc(kd, size);
		if (kd->procbase == 0)
			return (0);
		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		if (size % sizeof(struct kinfo_proc) != 0) {
			_kvm_err(kd, kd->program,
			    "proc size mismatch (%d total, %d chunks)",
			    size, sizeof(struct kinfo_proc));
			return (0);
		}
		nprocs = size / sizeof(struct kinfo_proc);
	} else {
		struct nlist nl[4], *p;

		memset(nl, 0, sizeof(nl));
		nl[0].n_name = "_nprocs";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = NULL;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
			    "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		size = nprocs * sizeof(struct kinfo_proc);
		kd->procbase = _kvm_malloc(kd, size);
		if (kd->procbase == 0)
			return (0);

		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
		    nl[2].n_value, nprocs);
#ifdef notdef
		size = nprocs * sizeof(struct kinfo_proc);
		(void)realloc(kd->procbase, size);
#endif
	}
	*cnt = nprocs;
	return (kd->procbase);
}
Exemplo n.º 12
0
struct kinfo_proc2 *
kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
{
	int mib[6], st, nprocs;
	struct user user;
	size_t size;

	if ((ssize_t)esize < 0)
		return (NULL);

	if (kd->procbase2 != NULL) {
		free(kd->procbase2);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase2 = 0;
	}

	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC2;
		mib[2] = op;
		mib[3] = arg;
		mib[4] = esize;
		mib[5] = 0;
		st = sysctl(mib, 6, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}

		mib[5] = size / esize;
		kd->procbase2 = _kvm_malloc(kd, size);
		if (kd->procbase2 == 0)
			return (NULL);
		st = sysctl(mib, 6, kd->procbase2, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}
		nprocs = size / esize;
	} else {
		struct kinfo_proc2 kp2, *kp2p;
		struct kinfo_proc *kp;
		char *kp2c;
		int i;

		kp = kvm_getprocs(kd, op, arg, &nprocs);
		if (kp == NULL)
			return (NULL);

		kd->procbase2 = _kvm_malloc(kd, nprocs * esize);
		kp2c = (char *)kd->procbase2;
		kp2p = &kp2;
		for (i = 0; i < nprocs; i++, kp++) {
			memset(kp2p, 0, sizeof(kp2));
			kp2p->p_paddr = PTRTOINT64(kp->kp_eproc.e_paddr);

			kp2p->p_addr = PTRTOINT64(kp->kp_proc.p_addr);
			kp2p->p_fd = PTRTOINT64(kp->kp_proc.p_fd);
			kp2p->p_stats = PTRTOINT64(kp->kp_proc.p_stats);
			kp2p->p_limit = PTRTOINT64(kp->kp_eproc.e_limit);
			kp2p->p_vmspace = PTRTOINT64(kp->kp_proc.p_vmspace);
			kp2p->p_sigacts = PTRTOINT64(kp->kp_proc.p_sigacts);
			kp2p->p_sess = PTRTOINT64(kp->kp_eproc.e_sess);
			kp2p->p_tsess = 0;
			kp2p->p_ru = PTRTOINT64(kp->kp_proc.p_ru);

			kp2p->p_eflag = 0;
			kp2p->p_exitsig = kp->kp_proc.p_exitsig;
			kp2p->p_flag = kp->kp_proc.p_flag;

			kp2p->p_pid = kp->kp_proc.p_pid;

			kp2p->p_ppid = kp->kp_eproc.e_ppid;
#if 0
			kp2p->p_sid = kp->kp_eproc.e_sid;
#else
			kp2p->p_sid = -1; /* XXX */
#endif
			kp2p->p__pgid = kp->kp_eproc.e_pgid;

			kp2p->p_tpgid = -1;

			kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
			kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
			kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
			kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;

			memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
			    MIN(sizeof(kp2p->p_groups),
			    sizeof(kp->kp_eproc.e_ucred.cr_groups)));
			kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;

			kp2p->p_jobc = kp->kp_eproc.e_jobc;
			kp2p->p_tdev = kp->kp_eproc.e_tdev;
			kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
			kp2p->p_tsess = PTRTOINT64(kp->kp_eproc.e_tsess);

			kp2p->p_estcpu = kp->kp_proc.p_estcpu;
			kp2p->p_rtime_sec = kp->kp_proc.p_estcpu;
			kp2p->p_rtime_usec = kp->kp_proc.p_estcpu;
			kp2p->p_cpticks = kp->kp_proc.p_cpticks;
			kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
			kp2p->p_swtime = kp->kp_proc.p_swtime;
			kp2p->p_slptime = kp->kp_proc.p_slptime;
			kp2p->p_schedflags = 0;

			kp2p->p_uticks = kp->kp_proc.p_uticks;
			kp2p->p_sticks = kp->kp_proc.p_sticks;
			kp2p->p_iticks = kp->kp_proc.p_iticks;

			kp2p->p_tracep = PTRTOINT64(kp->kp_proc.p_tracep);
			kp2p->p_traceflag = kp->kp_proc.p_traceflag;

			kp2p->p_holdcnt = 1;

			kp2p->p_siglist = kp->kp_proc.p_siglist;
			kp2p->p_sigmask = kp->kp_proc.p_sigmask;
			kp2p->p_sigignore = kp->kp_proc.p_sigignore;
			kp2p->p_sigcatch = kp->kp_proc.p_sigcatch;

			kp2p->p_stat = kp->kp_proc.p_stat;
			kp2p->p_priority = kp->kp_proc.p_priority;
			kp2p->p_usrpri = kp->kp_proc.p_usrpri;
			kp2p->p_nice = kp->kp_proc.p_nice;

			kp2p->p_xstat = kp->kp_proc.p_xstat;
			kp2p->p_acflag = kp->kp_proc.p_acflag;

			strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
			    MIN(sizeof(kp2p->p_comm), sizeof(kp->kp_proc.p_comm)));

			strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
			    sizeof(kp2p->p_wmesg));
			kp2p->p_wchan = PTRTOINT64(kp->kp_proc.p_wchan);

			strncpy(kp2p->p_login, kp->kp_eproc.e_login,
			    sizeof(kp2p->p_login));

			kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
			kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
			kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
			kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;

			kp2p->p_eflag = kp->kp_eproc.e_flag;

			if (P_ZOMBIE(&kp->kp_proc) || kp->kp_proc.p_addr == NULL ||
			    KREAD(kd, (u_long)kp->kp_proc.p_addr, &user)) {
				kp2p->p_uvalid = 0;
			} else {
				kp2p->p_uvalid = 1;

				kp2p->p_ustart_sec = user.u_stats.p_start.tv_sec;
				kp2p->p_ustart_usec = user.u_stats.p_start.tv_usec;

				kp2p->p_uutime_sec = user.u_stats.p_ru.ru_utime.tv_sec;
				kp2p->p_uutime_usec = user.u_stats.p_ru.ru_utime.tv_usec;
				kp2p->p_ustime_sec = user.u_stats.p_ru.ru_stime.tv_sec;
				kp2p->p_ustime_usec = user.u_stats.p_ru.ru_stime.tv_usec;

				kp2p->p_uru_maxrss = user.u_stats.p_ru.ru_maxrss;
				kp2p->p_uru_ixrss = user.u_stats.p_ru.ru_ixrss;
				kp2p->p_uru_idrss = user.u_stats.p_ru.ru_idrss;
				kp2p->p_uru_isrss = user.u_stats.p_ru.ru_isrss;
				kp2p->p_uru_minflt = user.u_stats.p_ru.ru_minflt;
				kp2p->p_uru_majflt = user.u_stats.p_ru.ru_majflt;
				kp2p->p_uru_nswap = user.u_stats.p_ru.ru_nswap;
				kp2p->p_uru_inblock = user.u_stats.p_ru.ru_inblock;
				kp2p->p_uru_oublock = user.u_stats.p_ru.ru_oublock;
				kp2p->p_uru_msgsnd = user.u_stats.p_ru.ru_msgsnd;
				kp2p->p_uru_msgrcv = user.u_stats.p_ru.ru_msgrcv;
				kp2p->p_uru_nsignals = user.u_stats.p_ru.ru_nsignals;
				kp2p->p_uru_nvcsw = user.u_stats.p_ru.ru_nvcsw;
				kp2p->p_uru_nivcsw = user.u_stats.p_ru.ru_nivcsw;

				kp2p->p_uctime_sec =
				    user.u_stats.p_cru.ru_utime.tv_sec +
				    user.u_stats.p_cru.ru_stime.tv_sec;
				kp2p->p_uctime_usec =
				    user.u_stats.p_cru.ru_utime.tv_usec +
				    user.u_stats.p_cru.ru_stime.tv_usec;
			}

			memcpy(kp2c, &kp2, esize);
			kp2c += esize;
		}

		free(kd->procbase);
	}
	*cnt = nprocs;
	return (kd->procbase2);
}
Exemplo n.º 13
0
char *
kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[2], st, nfiles;
	size_t size;
	struct file *fp, *fplim;
	struct filelist filehead;

	if (kvm_ishost(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_FILE;
		st = sysctl(mib, 2, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		st = sysctl(mib, 2, kd->argspc, &size, NULL, 0);
		if (st == -1 || size < sizeof(filehead)) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		filehead = *(struct filelist *)kd->argspc;
		fp = (struct file *)(kd->argspc + sizeof (filehead));
		fplim = (struct file *)(kd->argspc + size);
		for (nfiles = 0; filehead.lh_first && (fp < fplim); nfiles++, fp++)
			filehead.lh_first = fp->f_list.le_next;
	} else {
		struct nlist nl[3], *p;

		nl[0].n_name = "_filehead";
		nl[1].n_name = "_nfiles";
		nl[2].n_name = 0;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nfiles)) {
			_kvm_err(kd, kd->program, "can't read nfiles");
			return (0);
		}
		size = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		nfiles = kvm_deadfiles(kd, op, arg, nl[1].n_value, nfiles);
		if (nfiles == 0)
			return (0);
	}
	*cnt = nfiles;
	return (kd->argspc);
}
Exemplo n.º 14
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
{
	int mib[6], st, nthreads;
	size_t size;

	if ((ssize_t)esize < 0)
		return (NULL);

	if (kd->procbase != NULL) {
		free(kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}

	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		mib[4] = esize;
		mib[5] = 0;
		st = sysctl(mib, 6, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (NULL);
		}

		mib[5] = size / esize;
		kd->procbase = _kvm_malloc(kd, size);
		if (kd->procbase == 0)
			return (NULL);
		st = sysctl(mib, 6, kd->procbase, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (NULL);
		}
		nthreads = size / esize;
	} else {
		struct nlist nl[4];
		int i, maxthread;
		struct proc *p;
		char *bp;

		if (esize > sizeof(struct kinfo_proc)) {
			_kvm_syserr(kd, kd->program,
			    "kvm_getprocs: unknown fields requested: libkvm out of date?");
			return (NULL);
		}

		memset(nl, 0, sizeof(nl));
		nl[0].n_name = "_nthreads";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = NULL;

		if (kvm_nlist(kd, nl) != 0) {
			for (i = 0; nl[i].n_type != 0; ++i)
				;
			_kvm_err(kd, kd->program,
			    "%s: no such symbol", nl[i].n_name);
			return (NULL);
		}
		if (KREAD(kd, nl[0].n_value, &maxthread)) {
			_kvm_err(kd, kd->program, "can't read nthreads");
			return (NULL);
		}

		kd->procbase = _kvm_malloc(kd, maxthread * esize);
		if (kd->procbase == 0)
			return (NULL);
		bp = (char *)kd->procbase;

		/* allproc */
		if (KREAD(kd, nl[1].n_value, &p)) {
			_kvm_err(kd, kd->program, "cannot read allproc");
			return (NULL);
		}
		nthreads = kvm_proclist(kd, op, arg, p, bp, maxthread, esize);
		if (nthreads < 0)
			return (NULL);

		/* zombproc */
		if (KREAD(kd, nl[2].n_value, &p)) {
			_kvm_err(kd, kd->program, "cannot read zombproc");
			return (NULL);
		}
		i = kvm_proclist(kd, op, arg, p, bp + (esize * nthreads),
		    maxthread - nthreads, esize);
		if (i > 0)
			nthreads += i;
	}
	if (kd->procbase != NULL)
		*cnt = nthreads;
	return (kd->procbase);
}
Exemplo n.º 15
0
int
_kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa)
{
	cpu_kcore_hdr_t *cpu_kh;
	struct vmstate *vm;
	int rv, page_off;
	alpha_pt_entry_t pte;
	off_t pteoff;

	if (!kd->vmst) {
		_kvm_err(kd, 0, "vatop called before initvtop");
		return (0);
	}

	if (ISALIVE(kd)) {
		_kvm_err(kd, 0, "vatop called in live kernel!");
		return (0);
	}

	cpu_kh = kd->cpu_data;
	vm = kd->vmst;
	page_off = va & (cpu_kh->page_size - 1);

#ifndef PAGE_SHIFT
#define	PAGE_SHIFT      vm->page_shift
#endif

	if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
		/*
		 * Direct-mapped address: just convert it.
		 */

		*pa = ALPHA_K0SEG_TO_PHYS(va);
		rv = cpu_kh->page_size - page_off;
	} else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
		/*
		 * Real kernel virtual address: do the translation.
		 */

		/* Find and read the L1 PTE. */
		pteoff = cpu_kh->lev1map_pa +
		    l1pte_index(va) * sizeof(alpha_pt_entry_t);
		if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
		    (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
			_kvm_syserr(kd, 0, "could not read L1 PTE");
			goto lose;
		}

		/* Find and read the L2 PTE. */
		if ((pte & ALPHA_PTE_VALID) == 0) {
			_kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
			goto lose;
		}
		pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
		    l2pte_index(va) * sizeof(alpha_pt_entry_t);
		if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
		    (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
			_kvm_syserr(kd, 0, "could not read L2 PTE");
			goto lose;
		}

		/* Find and read the L3 PTE. */
		if ((pte & ALPHA_PTE_VALID) == 0) {
			_kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
			goto lose;
		}
		pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
		    l3pte_index(va) * sizeof(alpha_pt_entry_t);
		if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
		    (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
			_kvm_syserr(kd, 0, "could not read L3 PTE");
			goto lose;
		}

		/* Fill in the PA. */
		if ((pte & ALPHA_PTE_VALID) == 0) {
			_kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
			goto lose;
		}
		*pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
		rv = cpu_kh->page_size - page_off;
	} else {
		/*
		 * Bogus address (not in KV space): punt.
		 */

		_kvm_err(kd, 0, "invalid kernel virtual address");
lose:
		*pa = -1;
		rv = 0;
	}

	return (rv);
}
Exemplo n.º 16
0
/*
 * Translate a kernel virtual address to a physical address.
 */
int
_kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa)
{
	cpu_kcore_hdr_t *cpu_kh;
	paddr_t pde_pa, pte_pa;
	u_long page_off;
	pd_entry_t pde;
	pt_entry_t pte;

	if (ISALIVE(kd)) {
		_kvm_err(kd, 0, "vatop called in live kernel!");
		return (0);
	}

	page_off = va & (kd->nbpg - 1);

	if (va >= PMAP_DIRECT_BASE && va <= PMAP_DIRECT_END) {
		*pa = va - PMAP_DIRECT_BASE;
		return (int)(kd->nbpg - page_off);
	}

	cpu_kh = kd->cpu_data;

	/*
	 * Find and read all entries to get to the pa.
	 */

	/*
	 * Level 4.
	 */
	pde_pa = cpu_kh->ptdpaddr + (pl4_pi(va) * sizeof(pd_entry_t));
	if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PT level 4 entry");
		goto lose;
	}
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid level 4 PDE)");
		goto lose;
	}

	/*
	 * Level 3.
	 */
	pde_pa = (pde & PG_FRAME) + (pl3_pi(va) * sizeof(pd_entry_t));
	if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PT level 3 entry");
		goto lose;
	}
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid level 3 PDE)");
		goto lose;
	}

	/*
	 * Level 2.
	 */
	pde_pa = (pde & PG_FRAME) + (pl2_pi(va) * sizeof(pd_entry_t));
	if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PT level 2 entry");
		goto lose;
	}
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid level 2 PDE)");
		goto lose;
	}

	/*
	 * Might be a large page.
	 */
	if ((pde & PG_PS) != 0) {
		page_off = va & (NBPD_L2 - 1);
		*pa = (pde & PG_LGFRAME) | page_off;
		return (int)(NBPD_L2 - page_off);
	}

	/*
	 * Level 1.
	 */
	pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
	if (pread(kd->pmfd, (void *) &pte, sizeof(pte),
	    _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
		_kvm_syserr(kd, 0, "could not read PTE");
		goto lose;
	}
	/*
	 * Validate the PTE and return the physical address.
	 */
	if ((pte & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid PTE)");
		goto lose;
	}
	*pa = (pte & PG_FRAME) + page_off;
	return (int)(kd->nbpg - page_off);

 lose:
	*pa = (u_long)~0L;
	return (0);
}
struct kinfo_proc2 *
kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
{
	size_t size;
	int mib[6], st, nprocs;
	struct pstats pstats;

	if (ISSYSCTL(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC2;
		mib[2] = op;
		mib[3] = arg;
		mib[4] = (int)esize;
again:
		mib[5] = 0;
		st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}

		mib[5] = (int) (size / esize);
		KVM_ALLOC(kd, procbase2, size);
		st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0);
		if (st == -1) {
			if (errno == ENOMEM) {
				goto again;
			}
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}
		nprocs = (int) (size / esize);
	} else {
		char *kp2c;
		struct kinfo_proc *kp;
		struct kinfo_proc2 kp2, *kp2p;
		struct kinfo_lwp *kl;
		int i, nlwps;

		kp = kvm_getprocs(kd, op, arg, &nprocs);
		if (kp == NULL)
			return (NULL);

		size = nprocs * esize;
		KVM_ALLOC(kd, procbase2, size);
		kp2c = (char *)(void *)kd->procbase2;
		kp2p = &kp2;
		for (i = 0; i < nprocs; i++, kp++) {
			struct timeval tv;

			kl = kvm_getlwps(kd, kp->kp_proc.p_pid,
			    (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr),
			    sizeof(struct kinfo_lwp), &nlwps);

			if (kl == NULL) {
				_kvm_syserr(kd, NULL,
					"kvm_getlwps() failed on process %u\n",
					kp->kp_proc.p_pid);
				if (nlwps == 0)
					return NULL;
				else
					continue;
			}

			/* We use kl[0] as the "representative" LWP */
			memset(kp2p, 0, sizeof(kp2));
			kp2p->p_forw = kl[0].l_forw;
			kp2p->p_back = kl[0].l_back;
			kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr);
			kp2p->p_addr = kl[0].l_addr;
			kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd);
			kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi);
			kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats);
			kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit);
			kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace);
			kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts);
			kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess);
			kp2p->p_tsess = 0;
#if 1 /* XXX: dsl - p_ru was only ever non-zero for zombies */
			kp2p->p_ru = 0;
#else
			kp2p->p_ru = PTRTOUINT64(pstats.p_ru);
#endif

			kp2p->p_eflag = 0;
			kp2p->p_exitsig = kp->kp_proc.p_exitsig;
			kp2p->p_flag = kp->kp_proc.p_flag;

			kp2p->p_pid = kp->kp_proc.p_pid;

			kp2p->p_ppid = kp->kp_eproc.e_ppid;
			kp2p->p_sid = kp->kp_eproc.e_sid;
			kp2p->p__pgid = kp->kp_eproc.e_pgid;

			kp2p->p_tpgid = -1 /* XXX NO_PGID! */;

			kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
			kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
			kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid;
			kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
			kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;
			kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid;

			/*CONSTCOND*/
			memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
			    MIN(sizeof(kp2p->p_groups),
			    sizeof(kp->kp_eproc.e_ucred.cr_groups)));
			kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;

			kp2p->p_jobc = kp->kp_eproc.e_jobc;
			kp2p->p_tdev = kp->kp_eproc.e_tdev;
			kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
			kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess);

			kp2p->p_estcpu = 0;
			bintime2timeval(&kp->kp_proc.p_rtime, &tv);
			kp2p->p_rtime_sec = (uint32_t)tv.tv_sec;
			kp2p->p_rtime_usec = (uint32_t)tv.tv_usec;
			kp2p->p_cpticks = kl[0].l_cpticks;
			kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
			kp2p->p_swtime = kl[0].l_swtime;
			kp2p->p_slptime = kl[0].l_slptime;
#if 0 /* XXX thorpej */
			kp2p->p_schedflags = kp->kp_proc.p_schedflags;
#else
			kp2p->p_schedflags = 0;
#endif

			kp2p->p_uticks = kp->kp_proc.p_uticks;
			kp2p->p_sticks = kp->kp_proc.p_sticks;
			kp2p->p_iticks = kp->kp_proc.p_iticks;

			kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep);
			kp2p->p_traceflag = kp->kp_proc.p_traceflag;

			kp2p->p_holdcnt = kl[0].l_holdcnt;

			memcpy(&kp2p->p_siglist,
			    &kp->kp_proc.p_sigpend.sp_set,
			    sizeof(ki_sigset_t));
			memset(&kp2p->p_sigmask, 0,
			    sizeof(ki_sigset_t));
			memcpy(&kp2p->p_sigignore,
			    &kp->kp_proc.p_sigctx.ps_sigignore,
			    sizeof(ki_sigset_t));
			memcpy(&kp2p->p_sigcatch,
			    &kp->kp_proc.p_sigctx.ps_sigcatch,
			    sizeof(ki_sigset_t));

			kp2p->p_stat = kl[0].l_stat;
			kp2p->p_priority = kl[0].l_priority;
			kp2p->p_usrpri = kl[0].l_priority;
			kp2p->p_nice = kp->kp_proc.p_nice;

			kp2p->p_xstat = kp->kp_proc.p_xstat;
			kp2p->p_acflag = kp->kp_proc.p_acflag;

			/*CONSTCOND*/
			strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
			    MIN(sizeof(kp2p->p_comm),
			    sizeof(kp->kp_proc.p_comm)));

			strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
			    sizeof(kp2p->p_wmesg));
			kp2p->p_wchan = kl[0].l_wchan;
			strncpy(kp2p->p_login, kp->kp_eproc.e_login,
			    sizeof(kp2p->p_login));

			kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
			kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
			kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
			kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;
			kp2p->p_vm_vsize = kp->kp_eproc.e_vm.vm_map.size
			    / kd->nbpg;
			/* Adjust mapped size */
			kp2p->p_vm_msize =
			    (kp->kp_eproc.e_vm.vm_map.size / kd->nbpg) -
			    kp->kp_eproc.e_vm.vm_issize +
			    kp->kp_eproc.e_vm.vm_ssize;

			kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag;

			kp2p->p_realflag = kp->kp_proc.p_flag;
			kp2p->p_nlwps = kp->kp_proc.p_nlwps;
			kp2p->p_nrlwps = kp->kp_proc.p_nrlwps;
			kp2p->p_realstat = kp->kp_proc.p_stat;

			if (P_ZOMBIE(&kp->kp_proc) ||
			    kp->kp_proc.p_stats == NULL ||
			    KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) {
				kp2p->p_uvalid = 0;
			} else {
				kp2p->p_uvalid = 1;

				kp2p->p_ustart_sec = (u_int32_t)
				    pstats.p_start.tv_sec;
				kp2p->p_ustart_usec = (u_int32_t)
				    pstats.p_start.tv_usec;

				kp2p->p_uutime_sec = (u_int32_t)
				    pstats.p_ru.ru_utime.tv_sec;
				kp2p->p_uutime_usec = (u_int32_t)
				    pstats.p_ru.ru_utime.tv_usec;
				kp2p->p_ustime_sec = (u_int32_t)
				    pstats.p_ru.ru_stime.tv_sec;
				kp2p->p_ustime_usec = (u_int32_t)
				    pstats.p_ru.ru_stime.tv_usec;

				kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss;
				kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss;
				kp2p->p_uru_idrss = pstats.p_ru.ru_idrss;
				kp2p->p_uru_isrss = pstats.p_ru.ru_isrss;
				kp2p->p_uru_minflt = pstats.p_ru.ru_minflt;
				kp2p->p_uru_majflt = pstats.p_ru.ru_majflt;
				kp2p->p_uru_nswap = pstats.p_ru.ru_nswap;
				kp2p->p_uru_inblock = pstats.p_ru.ru_inblock;
				kp2p->p_uru_oublock = pstats.p_ru.ru_oublock;
				kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd;
				kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv;
				kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals;
				kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw;
				kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw;

				kp2p->p_uctime_sec = (u_int32_t)
				    (pstats.p_cru.ru_utime.tv_sec +
				    pstats.p_cru.ru_stime.tv_sec);
				kp2p->p_uctime_usec = (u_int32_t)
				    (pstats.p_cru.ru_utime.tv_usec +
				    pstats.p_cru.ru_stime.tv_usec);
			}

			memcpy(kp2c, &kp2, esize);
			kp2c += esize;
		}
	}
	*cnt = nprocs;
	return (kd->procbase2);
}
struct kinfo_lwp *
kvm_getlwps(kvm_t *kd, int pid, u_long paddr, size_t esize, int *cnt)
{
	size_t size;
	int mib[5], nlwps;
	ssize_t st;
	struct kinfo_lwp *kl;

	if (ISSYSCTL(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_LWP;
		mib[2] = pid;
		mib[3] = (int)esize;
		mib[4] = 0;
again:
		st = sysctl(mib, 5, NULL, &size, NULL, (size_t)0);
		if (st == -1) {
			switch (errno) {
			case ESRCH: /* Treat this as a soft error; see kvm.c */
				_kvm_syserr(kd, NULL, "kvm_getlwps");
				return NULL;
			default:
				_kvm_syserr(kd, kd->program, "kvm_getlwps");
				return NULL;
			}
		}
		mib[4] = (int) (size / esize);
		KVM_ALLOC(kd, lwpbase, size);
		st = sysctl(mib, 5, kd->lwpbase, &size, NULL, (size_t)0);
		if (st == -1) {
			switch (errno) {
			case ESRCH: /* Treat this as a soft error; see kvm.c */
				_kvm_syserr(kd, NULL, "kvm_getlwps");
				return NULL;
			case ENOMEM:
				goto again;
			default:
				_kvm_syserr(kd, kd->program, "kvm_getlwps");
				return NULL;
			}
		}
		nlwps = (int) (size / esize);
	} else {
		/* grovel through the memory image */
		struct proc p;
		struct lwp l;
		u_long laddr;
		void *back;
		int i;

		st = kvm_read(kd, paddr, &p, sizeof(p));
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getlwps");
			return (NULL);
		}

		nlwps = p.p_nlwps;
		size = nlwps * sizeof(*kd->lwpbase);
		KVM_ALLOC(kd, lwpbase, size);
		laddr = (u_long)PTRTOUINT64(p.p_lwps.lh_first);
		for (i = 0; (i < nlwps) && (laddr != 0); i++) {
			st = kvm_read(kd, laddr, &l, sizeof(l));
			if (st == -1) {
				_kvm_syserr(kd, kd->program, "kvm_getlwps");
				return (NULL);
			}
			kl = &kd->lwpbase[i];
			kl->l_laddr = laddr;
			kl->l_forw = PTRTOUINT64(l.l_runq.tqe_next);
			laddr = (u_long)PTRTOUINT64(l.l_runq.tqe_prev);
			st = kvm_read(kd, laddr, &back, sizeof(back));
			if (st == -1) {
				_kvm_syserr(kd, kd->program, "kvm_getlwps");
				return (NULL);
			}
			kl->l_back = PTRTOUINT64(back);
			kl->l_addr = PTRTOUINT64(l.l_addr);
			kl->l_lid = l.l_lid;
			kl->l_flag = l.l_flag;
			kl->l_swtime = l.l_swtime;
			kl->l_slptime = l.l_slptime;
			kl->l_schedflags = 0; /* XXX */
			kl->l_holdcnt = 0;
			kl->l_priority = l.l_priority;
			kl->l_usrpri = l.l_priority;
			kl->l_stat = l.l_stat;
			kl->l_wchan = PTRTOUINT64(l.l_wchan);
			if (l.l_wmesg)
				(void)kvm_read(kd, (u_long)l.l_wmesg,
				    kl->l_wmesg, (size_t)WMESGLEN);
			kl->l_cpuid = KI_NOCPU;
			laddr = (u_long)PTRTOUINT64(l.l_sibling.le_next);
		}
	}

	*cnt = nlwps;
	return (kd->lwpbase);
}
Exemplo n.º 19
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[4], st, nprocs;
	size_t size, osize;
	int temp_op;

	if (kd->procbase != 0) {
		free((void *)kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}
	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		temp_op = op & ~KERN_PROC_INC_THREAD;
		st = sysctl(mib,
		    temp_op == KERN_PROC_ALL || temp_op == KERN_PROC_PROC ?
		    3 : 4, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		/*
		 * We can't continue with a size of 0 because we pass
		 * it to realloc() (via _kvm_realloc()), and passing 0
		 * to realloc() results in undefined behavior.
		 */
		if (size == 0) {
			/*
			 * XXX: We should probably return an invalid,
			 * but non-NULL, pointer here so any client
			 * program trying to dereference it will
			 * crash.  However, _kvm_freeprocs() calls
			 * free() on kd->procbase if it isn't NULL,
			 * and free()'ing a junk pointer isn't good.
			 * Then again, _kvm_freeprocs() isn't used
			 * anywhere . . .
			 */
			kd->procbase = _kvm_malloc(kd, 1);
			goto liveout;
		}
		do {
			size += size / 10;
			kd->procbase = (struct kinfo_proc *)
			    _kvm_realloc(kd, kd->procbase, size);
			if (kd->procbase == NULL)
				return (0);
			osize = size;
			st = sysctl(mib, temp_op == KERN_PROC_ALL ||
			    temp_op == KERN_PROC_PROC ? 3 : 4,
			    kd->procbase, &size, NULL, 0);
		} while (st == -1 && errno == ENOMEM && size == osize);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		/*
		 * We have to check the size again because sysctl()
		 * may "round up" oldlenp if oldp is NULL; hence it
		 * might've told us that there was data to get when
		 * there really isn't any.
		 */
		if (size > 0 &&
		    kd->procbase->ki_structsize != sizeof(struct kinfo_proc)) {
			_kvm_err(kd, kd->program,
			    "kinfo_proc size mismatch (expected %zu, got %d)",
			    sizeof(struct kinfo_proc),
			    kd->procbase->ki_structsize);
			return (0);
		}
liveout:
		nprocs = size == 0 ? 0 : size / kd->procbase->ki_structsize;
	} else {
		struct nlist nl[7], *p;

		nl[0].n_name = "_nprocs";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = "_ticks";
		nl[4].n_name = "_hz";
		nl[5].n_name = "_cpu_tick_frequency";
		nl[6].n_name = 0;

		if (!kd->arch->ka_native(kd)) {
			_kvm_err(kd, kd->program,
			    "cannot read procs from non-native core");
			return (0);
		}

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		if (KREAD(kd, nl[3].n_value, &ticks)) {
			_kvm_err(kd, kd->program, "can't read ticks");
			return (0);
		}
		if (KREAD(kd, nl[4].n_value, &hz)) {
			_kvm_err(kd, kd->program, "can't read hz");
			return (0);
		}
		if (KREAD(kd, nl[5].n_value, &cpu_tick_frequency)) {
			_kvm_err(kd, kd->program,
			    "can't read cpu_tick_frequency");
			return (0);
		}
		size = nprocs * sizeof(struct kinfo_proc);
		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
		if (kd->procbase == NULL)
			return (0);

		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
				      nl[2].n_value, nprocs);
		if (nprocs <= 0) {
			_kvm_freeprocs(kd);
			nprocs = 0;
		}
#ifdef notdef
		else {
			size = nprocs * sizeof(struct kinfo_proc);
			kd->procbase = realloc(kd->procbase, size);
		}
#endif
	}
	*cnt = nprocs;
	return (kd->procbase);
}