Пример #1
0
int
_kvm_minidump_initvtop(kvm_t *kd)
{
	struct vmstate *vmst;
	off_t off;

	vmst = _kvm_malloc(kd, sizeof(*vmst));
	if (vmst == 0) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}
	kd->vmst = vmst;
	vmst->minidump = 1;
	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
	    sizeof(vmst->hdr)) {
		_kvm_err(kd, kd->program, "cannot read dump header");
		return (-1);
	}
	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
		_kvm_err(kd, kd->program, "not a minidump for this platform");
		return (-1);
	}
	if (vmst->hdr.version != MINIDUMP_VERSION) {
		_kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
		    MINIDUMP_VERSION, vmst->hdr.version);
		return (-1);
	}

	/* Skip header and msgbuf */
	off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize);

	vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize);
	if (vmst->bitmap == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize);
		return (-1);
	}
	if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) !=
	    (ssize_t)vmst->hdr.bitmapsize) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize);
		return (-1);
	}
	off += round_page(vmst->hdr.bitmapsize);

	vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize);
	if (vmst->ptemap == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate %d bytes for ptemap", vmst->hdr.ptesize);
		return (-1);
	}
	if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) !=
	    (ssize_t)vmst->hdr.ptesize) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize);
		return (-1);
	}
	off += vmst->hdr.ptesize;

	/* build physical address hash table for sparse pages */
	inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off);

	return (0);
}
Пример #2
0
static int
_sparc64_initvtop(kvm_t *kd)
{
	struct sparc64_dump_hdr hdr;
	struct sparc64_dump_reg	*regs;
	struct vmstate *vm;
	size_t regsz;
	uint64_t pa;
	int i;

	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
	if (vm == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}
	kd->vmst = vm;

	if (!_sparc64_read_phys(kd, 0, &hdr, sizeof(hdr)))
		goto fail_vm;
	hdr.dh_hdr_size = be64toh(hdr.dh_hdr_size);
	hdr.dh_tsb_pa = be64toh(hdr.dh_tsb_pa);
	hdr.dh_tsb_size = be64toh(hdr.dh_tsb_size);
	hdr.dh_tsb_mask = be64toh(hdr.dh_tsb_mask);
	hdr.dh_nregions = be32toh(hdr.dh_nregions);
	pa = hdr.dh_tsb_pa;

	regsz = hdr.dh_nregions * sizeof(*regs);
	regs = _kvm_malloc(kd, regsz);
	if (regs == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate regions");
		goto fail_vm;
	}
	if (!_sparc64_read_phys(kd, sizeof(hdr), regs, regsz))
		goto fail_regs;
	for (i = 0; i < hdr.dh_nregions; i++) {
		regs[i].dr_pa = be64toh(regs[i].dr_pa);
		regs[i].dr_size = be64toh(regs[i].dr_size);
		regs[i].dr_offs = be64toh(regs[i].dr_offs);
	}
	qsort(regs, hdr.dh_nregions, sizeof(*regs), _sparc64_reg_cmp);

	vm->vm_tsb_mask = hdr.dh_tsb_mask;
	vm->vm_regions = regs;
	vm->vm_nregions = hdr.dh_nregions;
	vm->vm_tsb_off = _sparc64_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size);
	if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) {
		_kvm_err(kd, kd->program, "tsb not found in dump");
		goto fail_regs;
	}
	return (0);

fail_regs:
	free(regs);
fail_vm:
	free(vm);
	return (-1);
}
int
_kvm_initvtop(kvm_t *kd)
{
	struct vmstate *vm;
	struct stat st;
	struct nlist nl[2];

	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
	if (vm == 0)
		return (-1);

	kd->vmst = vm;

	if (fstat(kd->pmfd, &st) < 0)
		return (-1);

	/* Get end of kernel address */
	nl[0].n_name = "_end";
	nl[1].n_name = 0;
	if (kvm_nlist(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "pmap_stod: no such symbol");
		return (-1);
	}
	vm->end = (u_long)nl[0].n_value;

	return (0);
}
Пример #4
0
int
_kvm_initvtop(kvm_t *kd)
{
    struct sparc64_dump_hdr hdr;
    struct sparc64_dump_reg	*regs;
    struct vmstate *vm;
    size_t regsz;
    vm_offset_t pa;

    vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
    if (vm == NULL) {
        _kvm_err(kd, kd->program, "cannot allocate vm");
        return (-1);
    }
    kd->vmst = vm;

    if (!_kvm_read_phys(kd, 0, &hdr, sizeof(hdr)))
        goto fail_vm;
    pa = hdr.dh_tsb_pa;

    regsz = hdr.dh_nregions * sizeof(*regs);
    regs = _kvm_malloc(kd, regsz);
    if (regs == NULL) {
        _kvm_err(kd, kd->program, "cannot allocate regions");
        goto fail_vm;
    }
    if (!_kvm_read_phys(kd, sizeof(hdr), regs, regsz))
        goto fail_regs;
    qsort(regs, hdr.dh_nregions, sizeof(*regs), _kvm_reg_cmp);

    vm->vm_tsb_mask = hdr.dh_tsb_mask;
    vm->vm_regions = regs;
    vm->vm_nregions = hdr.dh_nregions;
    vm->vm_tsb_off = _kvm_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size);
    if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) {
        _kvm_err(kd, kd->program, "tsb not found in dump");
        goto fail_regs;
    }
    return (0);

fail_regs:
    free(regs);
fail_vm:
    free(vm);
    return (-1);
}
Пример #5
0
/*
 * Build proc info array by reading in proc list from a crash dump.
 * We reallocate kd->procbase as necessary.
 */
static int
kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc,
	      int allproc_hsize)
{
	struct kinfo_proc *bp;
	struct proc *p;
	struct proclist **pl;
	int cnt, partcnt, n;
	u_long nextoff;

	cnt = partcnt = 0;
	nextoff = 0;

	/*
	 * Dynamically allocate space for all the elements of the
	 * allprocs array and KREAD() them.
	 */
	pl = _kvm_malloc(kd, allproc_hsize * sizeof(struct proclist *));
	for (n = 0; n < allproc_hsize; n++) {
		pl[n] = _kvm_malloc(kd, sizeof(struct proclist));
		nextoff = a_allproc + (n * sizeof(struct proclist));
		if (KREAD(kd, (u_long)nextoff, pl[n])) {
			_kvm_err(kd, kd->program, "can't read proclist at 0x%lx",
				a_allproc);
			return (-1);
		}

		/* Ignore empty proclists */
		if (LIST_EMPTY(pl[n]))
			continue;

		bp = kd->procbase + cnt;
		p = pl[n]->lh_first;
		partcnt = kvm_proclist(kd, what, arg, p, bp);
		if (partcnt < 0) {
			free(pl[n]);
			return (partcnt);
		}

		cnt += partcnt;
		free(pl[n]);
	}

	return (cnt);
}
Пример #6
0
int
_kvm_initvtop(kvm_t *kd)
{
	struct vmstate *vm;
	struct nlist nl[4];
	struct uvmexp uvmexp;

	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
	if (vm == 0)
		return (-1);
	kd->vmst = vm;

	nl[0].n_name = "Sysmap";
	nl[1].n_name = "Sysmapsize";
	nl[2].n_name = "uvmexp";
	nl[3].n_name = 0;

	if (kvm_nlist(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "bad namelist");
		return (-1);
	}
	if (KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmap)) {
		_kvm_err(kd, kd->program, "cannot read Sysmap");
		return (-1);
	}
	if (KREAD(kd, (u_long)nl[1].n_value, &vm->Sysmapsize)) {
		_kvm_err(kd, kd->program, "cannot read Sysmapsize");
		return (-1);
	}
	/*
	 * We are only interested in the first three fields of struct
	 * uvmexp, so do not try to read more than necessary (especially
	 * in case the layout changes).
	 */
	if (kvm_read(kd, (u_long)nl[2].n_value, &uvmexp,
	    3 * sizeof(int)) != 3 * sizeof(int)) {
		_kvm_err(kd, kd->program, "cannot read uvmexp");
		return (-1);
	}
	vm->pagesize = uvmexp.pagesize;
	vm->pagemask = uvmexp.pagemask;
	vm->pageshift = uvmexp.pageshift;

	/*
	 * Older kernels might not have this symbol; in which case
	 * we use the value of VM_MIN_KERNE_ADDRESS they must have.
	 */

	nl[0].n_name = "Sysmapbase";
	nl[1].n_name = 0;
	if (kvm_nlist(kd, nl) != 0 ||
	    KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmapbase))
		vm->Sysmapbase = (vaddr_t)CKSSEG_BASE;

	return (0);
}
Пример #7
0
int
_kvm_initvtop(kvm_t *kd)
{
	struct nlist nl[2];
	struct vmstate *vm;
	u_long pa;

	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
	if (vm == NULL)
		return (-1);
	kd->vmst = vm;

	vm->PTD = NULL;

	nl[0].n_name = "_PTDpaddr";
	nl[1].n_name = NULL;

	if (kvm_nlist(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "bad namelist");
		return (-1);
	}

	if (_kvm_pread(kd, kd->pmfd, &pa, sizeof pa,
	    (off_t)_kvm_pa2off(kd, nl[0].n_value - KERNBASE)) != sizeof pa)
		goto invalid;

	vm->PTD = (pd_entry_t *)_kvm_malloc(kd, NBPG);

	if (_kvm_pread(kd, kd->pmfd, vm->PTD, NBPG,
	    (off_t)_kvm_pa2off(kd, pa)) != NBPG)
		goto invalid;

	return (0);

invalid:
	if (vm->PTD != NULL) {
		free(vm->PTD);
		vm->PTD = NULL;
	}
	return (-1);
}
Пример #8
0
static int
_powerpc64_initvtop(kvm_t *kd)
{

	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
	if (kd->vmst == NULL)
		return (-1);

	if (powerpc_maphdrs(kd) == -1)
		return (-1);

	return (0);
}
Пример #9
0
int
_kvm_initvtop(kvm_t *kd)
{

	kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
	if (kd->vmst == NULL) {
		_kvm_err(kd, kd->program, "out of virtual memory");
		return (-1);
	}
	if (powerpc_maphdrs(kd) == -1) {
		free(kd->vmst);
		kd->vmst = NULL;
		return (-1);
	}
	return (0);
}
Пример #10
0
int
_kvm_initvtop(kvm_t *kd)
{
	cpu_kcore_hdr_t *cpu_kh;
	struct vmstate *vm;

	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
	if (vm == NULL)
		return (-1);

	cpu_kh = kd->cpu_data;

	/* Compute page_shift. */
	for (vm->page_shift = 0; (1L << vm->page_shift) < cpu_kh->page_size;
	    vm->page_shift++)
		/* nothing */ ;
	if ((1L << vm->page_shift) != cpu_kh->page_size) {
		free(vm);
		return (-1);
	}

	kd->vmst = vm;
	return (0);
}
Пример #11
0
/*
 * Get file structures.
 */
static int
kvm_deadfiles(kvm_t *kd, int op __unused, int arg __unused, long allproc_o,
    int nprocs __unused)
{
	struct proc proc;
	struct filedesc filed;
	int buflen = kd->arglen, ocnt = 0, n = 0, once = 0, i;
	struct file **ofiles;
	struct file *fp;
	struct proc *p;
	char *where = kd->argspc;

	if (buflen < (int)(sizeof(struct file *) + sizeof(struct file)))
		return (0);
	if (KREAD(kd, allproc_o, &p)) {
		_kvm_err(kd, kd->program, "cannot read allproc");
		return (0);
	}
	for (; p != NULL; p = LIST_NEXT(&proc, p_list)) {
		if (KREAD(kd, (u_long)p, &proc)) {
			_kvm_err(kd, kd->program, "can't read proc at %p", p);
			goto fail;
		}
		if (proc.p_state == PRS_NEW)
			continue;
		if (proc.p_fd == NULL)
			continue;
		if (KREAD(kd, (u_long)p->p_fd, &filed)) {
			_kvm_err(kd, kd->program, "can't read filedesc at %p",
			    p->p_fd);
			goto fail;
		}
		if (filed.fd_lastfile + 1 > ocnt) {
			ocnt = filed.fd_lastfile + 1;
			free(ofiles);
			ofiles = (struct file **)_kvm_malloc(kd,
				ocnt * sizeof(struct file *));
			if (ofiles == 0)
				return (0);
		}
		if (KREADN(kd, (u_long)filed.fd_ofiles, ofiles,
		    ocnt * sizeof(struct file *))) {
			_kvm_err(kd, kd->program, "can't read ofiles at %p",
			    filed.fd_ofiles);
			return (0);
		}
		for (i = 0; i <= filed.fd_lastfile; i++) {
			if ((fp = ofiles[i]) == NULL)
				continue;
			/*
			 * copyout filehead (legacy)
			 */
			if (!once) {
				*(struct file **)kd->argspc = fp;
				*(struct file **)where = fp;
				buflen -= sizeof (fp);
				where += sizeof (fp);
				once = 1;
			}
			if (buflen < (int)sizeof(struct file))
				goto fail;
			if (KREAD(kd, (long)fp, ((struct file *)where))) {
				_kvm_err(kd, kd->program, "can't read kfp");
				goto fail;
			}
			buflen -= sizeof (struct file);
			fp = (struct file *)where;
			where += sizeof (struct file);
			n++;
		}
	}
	free(ofiles);
	return (n);
fail:
	free(ofiles);
	return (0);
	
}
Пример #12
0
char *
kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[2], st, n, nfiles, nprocs;
	size_t size;

	_kvm_syserr(kd, kd->program, "kvm_getfiles has been broken for years");
	return (0);
	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_FILE;
		st = sysctl(mib, 2, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < (int)size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		st = sysctl(mib, 2, kd->argspc, &size, NULL, 0);
		if (st != 0) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		nfiles = size / sizeof(struct xfile);
	} else {
		struct nlist nl[4], *p;

		nl[0].n_name = "_allproc";
		nl[1].n_name = "_nprocs";
		nl[2].n_name = "_nfiles";
		nl[3].n_name = 0;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[1].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		if (KREAD(kd, nl[2].n_value, &nfiles)) {
			_kvm_err(kd, kd->program, "can't read nfiles");
			return (0);
		}
		size = sizeof(void *) + (nfiles + 10) * sizeof(struct file);
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < (int)size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		n = kvm_deadfiles(kd, op, arg, nl[0].n_value, nprocs);
		if (n != nfiles) {
			_kvm_err(kd, kd->program, "inconsistant nfiles");
			return (0);
		}
		nfiles = n;
	}
	*cnt = nfiles;
	return (kd->argspc);
}
static char *
_kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long va, u_long *cnt)
{
	u_long addr, head;
	u_long offset;
	struct vm_map_entry vme;
	struct vm_amap amap;
	struct vm_anon *anonp, anon;
	struct vm_page pg;
	u_long slot;

	if (kd->swapspc == NULL) {
		kd->swapspc = _kvm_malloc(kd, (size_t)kd->nbpg);
		if (kd->swapspc == NULL)
			return (NULL);
	}

	/*
	 * Look through the address map for the memory object
	 * that corresponds to the given virtual address.
	 * The header just has the entire valid range.
	 */
	head = (u_long)&p->p_vmspace->vm_map.header;
	addr = head;
	for (;;) {
		if (KREAD(kd, addr, &vme))
			return (NULL);

		if (va >= vme.start && va < vme.end &&
		    vme.aref.ar_amap != NULL)
			break;

		addr = (u_long)vme.next;
		if (addr == head)
			return (NULL);
	}

	/*
	 * we found the map entry, now to find the object...
	 */
	if (vme.aref.ar_amap == NULL)
		return (NULL);

	addr = (u_long)vme.aref.ar_amap;
	if (KREAD(kd, addr, &amap))
		return (NULL);

	offset = va - vme.start;
	slot = offset / kd->nbpg + vme.aref.ar_pageoff;
	/* sanity-check slot number */
	if (slot > amap.am_nslot)
		return (NULL);

	addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
	if (KREAD(kd, addr, &anonp))
		return (NULL);

	addr = (u_long)anonp;
	if (KREAD(kd, addr, &anon))
		return (NULL);

	addr = (u_long)anon.an_page;
	if (addr) {
		if (KREAD(kd, addr, &pg))
			return (NULL);

		if (_kvm_pread(kd, kd->pmfd, kd->swapspc, (size_t)kd->nbpg,
		    (off_t)pg.phys_addr) != kd->nbpg)
			return (NULL);
	} else {
		if (kd->swfd < 0 ||
		    _kvm_pread(kd, kd->swfd, kd->swapspc, (size_t)kd->nbpg,
		    (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
			return (NULL);
	}

	/* Found the page. */
	offset %= kd->nbpg;
	*cnt = kd->nbpg - offset;
	return (&kd->swapspc[(size_t)offset]);
}
Пример #14
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[4], st, nprocs;
	size_t size;

	if (kd->procbase != 0) {
		free((void *)kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}
	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		st = sysctl(mib, 4, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		kd->procbase = _kvm_malloc(kd, size);
		if (kd->procbase == 0)
			return (0);
		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		if (size % sizeof(struct kinfo_proc) != 0) {
			_kvm_err(kd, kd->program,
			    "proc size mismatch (%d total, %d chunks)",
			    size, sizeof(struct kinfo_proc));
			return (0);
		}
		nprocs = size / sizeof(struct kinfo_proc);
	} else {
		struct nlist nl[4], *p;

		memset(nl, 0, sizeof(nl));
		nl[0].n_name = "_nprocs";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = NULL;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
			    "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		size = nprocs * sizeof(struct kinfo_proc);
		kd->procbase = _kvm_malloc(kd, size);
		if (kd->procbase == 0)
			return (0);

		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
		    nl[2].n_value, nprocs);
#ifdef notdef
		size = nprocs * sizeof(struct kinfo_proc);
		(void)realloc(kd->procbase, size);
#endif
	}
	*cnt = nprocs;
	return (kd->procbase);
}
Пример #15
0
char *
kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[2], st, nfiles;
	size_t size;
	struct file *fp, *fplim;
	struct filelist filehead;

	if (kvm_ishost(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_FILE;
		st = sysctl(mib, 2, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		st = sysctl(mib, 2, kd->argspc, &size, NULL, 0);
		if (st == -1 || size < sizeof(filehead)) {
			_kvm_syserr(kd, kd->program, "kvm_getfiles");
			return (0);
		}
		filehead = *(struct filelist *)kd->argspc;
		fp = (struct file *)(kd->argspc + sizeof (filehead));
		fplim = (struct file *)(kd->argspc + size);
		for (nfiles = 0; filehead.lh_first && (fp < fplim); nfiles++, fp++)
			filehead.lh_first = fp->f_list.le_next;
	} else {
		struct nlist nl[3], *p;

		nl[0].n_name = "_filehead";
		nl[1].n_name = "_nfiles";
		nl[2].n_name = 0;

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nfiles)) {
			_kvm_err(kd, kd->program, "can't read nfiles");
			return (0);
		}
		size = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
		if (kd->argspc == 0)
			kd->argspc = (char *)_kvm_malloc(kd, size);
		else if (kd->arglen < size)
			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, size);
		if (kd->argspc == 0)
			return (0);
		kd->arglen = size;
		nfiles = kvm_deadfiles(kd, op, arg, nl[1].n_value, nfiles);
		if (nfiles == 0)
			return (0);
	}
	*cnt = nfiles;
	return (kd->argspc);
}
Пример #16
0
static int
_mips_minidump_initvtop(kvm_t *kd)
{
	struct vmstate *vmst;
	uint32_t *bitmap;
	off_t off;

	vmst = _kvm_malloc(kd, sizeof(*vmst));
	if (vmst == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}

	kd->vmst = vmst;

	if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 ||
	    kd->nlehdr.e_flags & EF_MIPS_ABI2)
		vmst->pte_size = 64;
	else
		vmst->pte_size = 32;

	if (pread(kd->pmfd, &vmst->hdr,
	    sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) {
		_kvm_err(kd, kd->program, "cannot read dump header");
		return (-1);
	}

	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
	    sizeof(vmst->hdr.magic)) != 0) {
		_kvm_err(kd, kd->program, "not a minidump for this platform");
		return (-1);
	}
	vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
	if (vmst->hdr.version != MINIDUMP_VERSION) {
		_kvm_err(kd, kd->program, "wrong minidump version. "
		    "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
		return (-1);
	}
	vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize);
	vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize);
	vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize);
	vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase);
	vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase);
	vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend);

	/* Skip header and msgbuf */
	off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize);

	bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize);
	if (bitmap == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate %d bytes for "
		    "bitmap", vmst->hdr.bitmapsize);
		return (-1);
	}

	if (pread(kd->pmfd, bitmap, vmst->hdr.bitmapsize, off) !=
	    (ssize_t)vmst->hdr.bitmapsize) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap",
		    vmst->hdr.bitmapsize);
		free(bitmap);
		return (-1);
	}
	off += mips_round_page(vmst->hdr.bitmapsize);

	vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize);
	if (vmst->ptemap == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate %d bytes for "
		    "ptemap", vmst->hdr.ptesize);
		free(bitmap);
		return (-1);
	}

	if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) !=
	    (ssize_t)vmst->hdr.ptesize) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for ptemap",
		    vmst->hdr.ptesize);
		free(bitmap);
		return (-1);
	}

	off += vmst->hdr.ptesize;

	/* Build physical address hash table for sparse pages */
	_kvm_hpt_init(kd, &vmst->hpt, bitmap, vmst->hdr.bitmapsize, off,
	    MIPS_PAGE_SIZE, sizeof(*bitmap));
	free(bitmap);

	return (0);
}
Пример #17
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
{
	int mib[4], st, nprocs;
	size_t size, osize;
	int temp_op;

	if (kd->procbase != 0) {
		free((void *)kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}
	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		temp_op = op & ~KERN_PROC_INC_THREAD;
		st = sysctl(mib,
		    temp_op == KERN_PROC_ALL || temp_op == KERN_PROC_PROC ?
		    3 : 4, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		/*
		 * We can't continue with a size of 0 because we pass
		 * it to realloc() (via _kvm_realloc()), and passing 0
		 * to realloc() results in undefined behavior.
		 */
		if (size == 0) {
			/*
			 * XXX: We should probably return an invalid,
			 * but non-NULL, pointer here so any client
			 * program trying to dereference it will
			 * crash.  However, _kvm_freeprocs() calls
			 * free() on kd->procbase if it isn't NULL,
			 * and free()'ing a junk pointer isn't good.
			 * Then again, _kvm_freeprocs() isn't used
			 * anywhere . . .
			 */
			kd->procbase = _kvm_malloc(kd, 1);
			goto liveout;
		}
		do {
			size += size / 10;
			kd->procbase = (struct kinfo_proc *)
			    _kvm_realloc(kd, kd->procbase, size);
			if (kd->procbase == NULL)
				return (0);
			osize = size;
			st = sysctl(mib, temp_op == KERN_PROC_ALL ||
			    temp_op == KERN_PROC_PROC ? 3 : 4,
			    kd->procbase, &size, NULL, 0);
		} while (st == -1 && errno == ENOMEM && size == osize);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (0);
		}
		/*
		 * We have to check the size again because sysctl()
		 * may "round up" oldlenp if oldp is NULL; hence it
		 * might've told us that there was data to get when
		 * there really isn't any.
		 */
		if (size > 0 &&
		    kd->procbase->ki_structsize != sizeof(struct kinfo_proc)) {
			_kvm_err(kd, kd->program,
			    "kinfo_proc size mismatch (expected %zu, got %d)",
			    sizeof(struct kinfo_proc),
			    kd->procbase->ki_structsize);
			return (0);
		}
liveout:
		nprocs = size == 0 ? 0 : size / kd->procbase->ki_structsize;
	} else {
		struct nlist nl[7], *p;

		nl[0].n_name = "_nprocs";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = "_ticks";
		nl[4].n_name = "_hz";
		nl[5].n_name = "_cpu_tick_frequency";
		nl[6].n_name = 0;

		if (!kd->arch->ka_native(kd)) {
			_kvm_err(kd, kd->program,
			    "cannot read procs from non-native core");
			return (0);
		}

		if (kvm_nlist(kd, nl) != 0) {
			for (p = nl; p->n_type != 0; ++p)
				;
			_kvm_err(kd, kd->program,
				 "%s: no such symbol", p->n_name);
			return (0);
		}
		if (KREAD(kd, nl[0].n_value, &nprocs)) {
			_kvm_err(kd, kd->program, "can't read nprocs");
			return (0);
		}
		if (KREAD(kd, nl[3].n_value, &ticks)) {
			_kvm_err(kd, kd->program, "can't read ticks");
			return (0);
		}
		if (KREAD(kd, nl[4].n_value, &hz)) {
			_kvm_err(kd, kd->program, "can't read hz");
			return (0);
		}
		if (KREAD(kd, nl[5].n_value, &cpu_tick_frequency)) {
			_kvm_err(kd, kd->program,
			    "can't read cpu_tick_frequency");
			return (0);
		}
		size = nprocs * sizeof(struct kinfo_proc);
		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
		if (kd->procbase == NULL)
			return (0);

		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
				      nl[2].n_value, nprocs);
		if (nprocs <= 0) {
			_kvm_freeprocs(kd);
			nprocs = 0;
		}
#ifdef notdef
		else {
			size = nprocs * sizeof(struct kinfo_proc);
			kd->procbase = realloc(kd->procbase, size);
		}
#endif
	}
	*cnt = nprocs;
	return (kd->procbase);
}
Пример #18
0
int
_kvm_initvtop(kvm_t *kd)
{
	struct vmstate *vm;
	struct nlist nl[2];
	u_long kernbase, physaddr, pa;
	pd_entry_t *l1pt;
	Elf32_Ehdr *ehdr;
	size_t hdrsz;
	char minihdr[8];

	if (!kd->rawdump) {
		if (pread(kd->pmfd, &minihdr, 8, 0) == 8) {
			if (memcmp(&minihdr, "minidump", 8) == 0)
				return (_kvm_minidump_initvtop(kd));
		} else {
			_kvm_err(kd, kd->program, "cannot read header");
			return (-1);
		}
	}

	vm = _kvm_malloc(kd, sizeof(*vm));
	if (vm == 0) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}
	kd->vmst = vm;
	vm->l1pt = NULL;
	if (_kvm_maphdrs(kd, sizeof(Elf32_Ehdr)) == -1)
		return (-1);
	ehdr = kd->vmst->mmapbase;
	hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
	if (_kvm_maphdrs(kd, hdrsz) == -1)
		return (-1);
	nl[0].n_name = "kernbase";
	nl[1].n_name = NULL;
	if (kvm_nlist(kd, nl) != 0)
		kernbase = KERNBASE;
	else
		kernbase = nl[0].n_value;

	nl[0].n_name = "physaddr";
	if (kvm_nlist(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "couldn't get phys addr");
		return (-1);
	}
	physaddr = nl[0].n_value;
	nl[0].n_name = "kernel_l1pa";
	if (kvm_nlist(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "bad namelist");
		return (-1);
	}
	if (kvm_read(kd, (nl[0].n_value - kernbase + physaddr), &pa,
	    sizeof(pa)) != sizeof(pa)) {
		_kvm_err(kd, kd->program, "cannot read kernel_l1pa");
		return (-1);
	}
	l1pt = _kvm_malloc(kd, L1_TABLE_SIZE);
	if (kvm_read(kd, pa, l1pt, L1_TABLE_SIZE) != L1_TABLE_SIZE) {
		_kvm_err(kd, kd->program, "cannot read l1pt");
		free(l1pt);
		return (-1);
	}
	vm->l1pt = l1pt;
	return 0;
}
Пример #19
0
static int
_arm_minidump_initvtop(kvm_t *kd)
{
	struct vmstate *vmst;
	off_t off, sparse_off;

	vmst = _kvm_malloc(kd, sizeof(*vmst));
	if (vmst == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}

	kd->vmst = vmst;

	if (pread(kd->pmfd, &vmst->hdr,
	    sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) {
		_kvm_err(kd, kd->program, "cannot read dump header");
		return (-1);
	}

	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
	    sizeof(vmst->hdr.magic)) != 0) {
		_kvm_err(kd, kd->program, "not a minidump for this platform");
		return (-1);
	}
	vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
	if (vmst->hdr.version != MINIDUMP_VERSION) {
		_kvm_err(kd, kd->program, "wrong minidump version. "
		    "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
		return (-1);
	}
	vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize);
	vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize);
	vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize);
	vmst->hdr.kernbase = _kvm32toh(kd, vmst->hdr.kernbase);
	vmst->hdr.arch = _kvm32toh(kd, vmst->hdr.arch);
	vmst->hdr.mmuformat = _kvm32toh(kd, vmst->hdr.mmuformat);
	if (vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_UNKNOWN) {
		/* This is a safe default as 1K pages are not used. */
		vmst->hdr.mmuformat = MINIDUMP_MMU_FORMAT_V6;
	}

	/* Skip header and msgbuf */
	off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize);

	sparse_off = off + arm_round_page(vmst->hdr.bitmapsize) +
	    arm_round_page(vmst->hdr.ptesize);
	if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
	    ARM_PAGE_SIZE, sizeof(uint32_t)) == -1) {
		_kvm_err(kd, kd->program, "cannot load core bitmap");
		return (-1);
	}
	off += arm_round_page(vmst->hdr.bitmapsize);

	vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize);
	if (vmst->ptemap == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate %d bytes for "
		    "ptemap", vmst->hdr.ptesize);
		return (-1);
	}

	if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) !=
	    (ssize_t)vmst->hdr.ptesize) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for ptemap",
		    vmst->hdr.ptesize);
		return (-1);
	}
	off += arm_round_page(vmst->hdr.ptesize);

	return (0);
}
Пример #20
0
static int
_amd64_minidump_initvtop(kvm_t *kd)
{
	struct vmstate *vmst;
	off_t off, sparse_off;

	vmst = _kvm_malloc(kd, sizeof(*vmst));
	if (vmst == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}
	kd->vmst = vmst;
	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
	    sizeof(vmst->hdr)) {
		_kvm_err(kd, kd->program, "cannot read dump header");
		return (-1);
	}
	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) {
		_kvm_err(kd, kd->program, "not a minidump for this platform");
		return (-1);
	}

	/*
	 * NB: amd64 minidump header is binary compatible between version 1
	 * and version 2; this may not be the case for the future versions.
	 */
	vmst->hdr.version = le32toh(vmst->hdr.version);
	if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
		_kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
		    MINIDUMP_VERSION, vmst->hdr.version);
		return (-1);
	}
	vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
	vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
	vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
	vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
	vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
	vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);

	/* Skip header and msgbuf */
	off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);

	sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) +
	    amd64_round_page(vmst->hdr.pmapsize);
	if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
	    AMD64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
		_kvm_err(kd, kd->program, "cannot load core bitmap");
		return (-1);
	}
	off += amd64_round_page(vmst->hdr.bitmapsize);

	vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize);
	if (vmst->page_map == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate %d bytes for page_map",
		    vmst->hdr.pmapsize);
		return (-1);
	}
	if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) !=
	    (ssize_t)vmst->hdr.pmapsize) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for page_map",
		    vmst->hdr.pmapsize);
		return (-1);
	}
	off += amd64_round_page(vmst->hdr.pmapsize);

	return (0);
}
Пример #21
0
struct kinfo_proc2 *
kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
{
	int mib[6], st, nprocs;
	struct user user;
	size_t size;

	if ((ssize_t)esize < 0)
		return (NULL);

	if (kd->procbase2 != NULL) {
		free(kd->procbase2);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase2 = 0;
	}

	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC2;
		mib[2] = op;
		mib[3] = arg;
		mib[4] = esize;
		mib[5] = 0;
		st = sysctl(mib, 6, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}

		mib[5] = size / esize;
		kd->procbase2 = _kvm_malloc(kd, size);
		if (kd->procbase2 == 0)
			return (NULL);
		st = sysctl(mib, 6, kd->procbase2, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}
		nprocs = size / esize;
	} else {
		struct kinfo_proc2 kp2, *kp2p;
		struct kinfo_proc *kp;
		char *kp2c;
		int i;

		kp = kvm_getprocs(kd, op, arg, &nprocs);
		if (kp == NULL)
			return (NULL);

		kd->procbase2 = _kvm_malloc(kd, nprocs * esize);
		kp2c = (char *)kd->procbase2;
		kp2p = &kp2;
		for (i = 0; i < nprocs; i++, kp++) {
			memset(kp2p, 0, sizeof(kp2));
			kp2p->p_paddr = PTRTOINT64(kp->kp_eproc.e_paddr);

			kp2p->p_addr = PTRTOINT64(kp->kp_proc.p_addr);
			kp2p->p_fd = PTRTOINT64(kp->kp_proc.p_fd);
			kp2p->p_stats = PTRTOINT64(kp->kp_proc.p_stats);
			kp2p->p_limit = PTRTOINT64(kp->kp_eproc.e_limit);
			kp2p->p_vmspace = PTRTOINT64(kp->kp_proc.p_vmspace);
			kp2p->p_sigacts = PTRTOINT64(kp->kp_proc.p_sigacts);
			kp2p->p_sess = PTRTOINT64(kp->kp_eproc.e_sess);
			kp2p->p_tsess = 0;
			kp2p->p_ru = PTRTOINT64(kp->kp_proc.p_ru);

			kp2p->p_eflag = 0;
			kp2p->p_exitsig = kp->kp_proc.p_exitsig;
			kp2p->p_flag = kp->kp_proc.p_flag;

			kp2p->p_pid = kp->kp_proc.p_pid;

			kp2p->p_ppid = kp->kp_eproc.e_ppid;
#if 0
			kp2p->p_sid = kp->kp_eproc.e_sid;
#else
			kp2p->p_sid = -1; /* XXX */
#endif
			kp2p->p__pgid = kp->kp_eproc.e_pgid;

			kp2p->p_tpgid = -1;

			kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
			kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
			kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
			kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;

			memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
			    MIN(sizeof(kp2p->p_groups),
			    sizeof(kp->kp_eproc.e_ucred.cr_groups)));
			kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;

			kp2p->p_jobc = kp->kp_eproc.e_jobc;
			kp2p->p_tdev = kp->kp_eproc.e_tdev;
			kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
			kp2p->p_tsess = PTRTOINT64(kp->kp_eproc.e_tsess);

			kp2p->p_estcpu = kp->kp_proc.p_estcpu;
			kp2p->p_rtime_sec = kp->kp_proc.p_estcpu;
			kp2p->p_rtime_usec = kp->kp_proc.p_estcpu;
			kp2p->p_cpticks = kp->kp_proc.p_cpticks;
			kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
			kp2p->p_swtime = kp->kp_proc.p_swtime;
			kp2p->p_slptime = kp->kp_proc.p_slptime;
			kp2p->p_schedflags = 0;

			kp2p->p_uticks = kp->kp_proc.p_uticks;
			kp2p->p_sticks = kp->kp_proc.p_sticks;
			kp2p->p_iticks = kp->kp_proc.p_iticks;

			kp2p->p_tracep = PTRTOINT64(kp->kp_proc.p_tracep);
			kp2p->p_traceflag = kp->kp_proc.p_traceflag;

			kp2p->p_holdcnt = 1;

			kp2p->p_siglist = kp->kp_proc.p_siglist;
			kp2p->p_sigmask = kp->kp_proc.p_sigmask;
			kp2p->p_sigignore = kp->kp_proc.p_sigignore;
			kp2p->p_sigcatch = kp->kp_proc.p_sigcatch;

			kp2p->p_stat = kp->kp_proc.p_stat;
			kp2p->p_priority = kp->kp_proc.p_priority;
			kp2p->p_usrpri = kp->kp_proc.p_usrpri;
			kp2p->p_nice = kp->kp_proc.p_nice;

			kp2p->p_xstat = kp->kp_proc.p_xstat;
			kp2p->p_acflag = kp->kp_proc.p_acflag;

			strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
			    MIN(sizeof(kp2p->p_comm), sizeof(kp->kp_proc.p_comm)));

			strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
			    sizeof(kp2p->p_wmesg));
			kp2p->p_wchan = PTRTOINT64(kp->kp_proc.p_wchan);

			strncpy(kp2p->p_login, kp->kp_eproc.e_login,
			    sizeof(kp2p->p_login));

			kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
			kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
			kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
			kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;

			kp2p->p_eflag = kp->kp_eproc.e_flag;

			if (P_ZOMBIE(&kp->kp_proc) || kp->kp_proc.p_addr == NULL ||
			    KREAD(kd, (u_long)kp->kp_proc.p_addr, &user)) {
				kp2p->p_uvalid = 0;
			} else {
				kp2p->p_uvalid = 1;

				kp2p->p_ustart_sec = user.u_stats.p_start.tv_sec;
				kp2p->p_ustart_usec = user.u_stats.p_start.tv_usec;

				kp2p->p_uutime_sec = user.u_stats.p_ru.ru_utime.tv_sec;
				kp2p->p_uutime_usec = user.u_stats.p_ru.ru_utime.tv_usec;
				kp2p->p_ustime_sec = user.u_stats.p_ru.ru_stime.tv_sec;
				kp2p->p_ustime_usec = user.u_stats.p_ru.ru_stime.tv_usec;

				kp2p->p_uru_maxrss = user.u_stats.p_ru.ru_maxrss;
				kp2p->p_uru_ixrss = user.u_stats.p_ru.ru_ixrss;
				kp2p->p_uru_idrss = user.u_stats.p_ru.ru_idrss;
				kp2p->p_uru_isrss = user.u_stats.p_ru.ru_isrss;
				kp2p->p_uru_minflt = user.u_stats.p_ru.ru_minflt;
				kp2p->p_uru_majflt = user.u_stats.p_ru.ru_majflt;
				kp2p->p_uru_nswap = user.u_stats.p_ru.ru_nswap;
				kp2p->p_uru_inblock = user.u_stats.p_ru.ru_inblock;
				kp2p->p_uru_oublock = user.u_stats.p_ru.ru_oublock;
				kp2p->p_uru_msgsnd = user.u_stats.p_ru.ru_msgsnd;
				kp2p->p_uru_msgrcv = user.u_stats.p_ru.ru_msgrcv;
				kp2p->p_uru_nsignals = user.u_stats.p_ru.ru_nsignals;
				kp2p->p_uru_nvcsw = user.u_stats.p_ru.ru_nvcsw;
				kp2p->p_uru_nivcsw = user.u_stats.p_ru.ru_nivcsw;

				kp2p->p_uctime_sec =
				    user.u_stats.p_cru.ru_utime.tv_sec +
				    user.u_stats.p_cru.ru_stime.tv_sec;
				kp2p->p_uctime_usec =
				    user.u_stats.p_cru.ru_utime.tv_usec +
				    user.u_stats.p_cru.ru_stime.tv_usec;
			}

			memcpy(kp2c, &kp2, esize);
			kp2c += esize;
		}

		free(kd->procbase);
	}
	*cnt = nprocs;
	return (kd->procbase2);
}
Пример #22
0
static int
_arm_initvtop(kvm_t *kd)
{
	struct vmstate *vm;
	struct kvm_nlist nl[2];
	kvaddr_t kernbase;
	arm_physaddr_t physaddr, pa;
	arm_pd_entry_t *l1pt;
	size_t i;
	int found;

	if (kd->rawdump) {
		_kvm_err(kd, kd->program, "raw dumps not supported on arm");
		return (-1);
	}

	vm = _kvm_malloc(kd, sizeof(*vm));
	if (vm == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}
	kd->vmst = vm;
	vm->l1pt = NULL;

	if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1)
		return (-1);

	found = 0;
	for (i = 0; i < vm->phnum; i++) {
		if (vm->phdr[i].p_type == PT_DUMP_DELTA) {
			kernbase = vm->phdr[i].p_vaddr;
			physaddr = vm->phdr[i].p_paddr;
			found = 1;
			break;
		}
	}

	nl[1].n_name = NULL;
	if (!found) {
		nl[0].n_name = "kernbase";
		if (kvm_nlist2(kd, nl) != 0) {
#ifdef __arm__
			kernbase = KERNBASE;
#else
		_kvm_err(kd, kd->program, "cannot resolve kernbase");
		return (-1);
#endif
		} else
			kernbase = nl[0].n_value;

		nl[0].n_name = "physaddr";
		if (kvm_nlist2(kd, nl) != 0) {
			_kvm_err(kd, kd->program, "couldn't get phys addr");
			return (-1);
		}
		physaddr = nl[0].n_value;
	}
	nl[0].n_name = "kernel_l1pa";
	if (kvm_nlist2(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "bad namelist");
		return (-1);
	}
	if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa,
	    sizeof(pa)) != sizeof(pa)) {
		_kvm_err(kd, kd->program, "cannot read kernel_l1pa");
		return (-1);
	}
	l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE);
	if (l1pt == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate l1pt");
		return (-1);
	}
	if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) {
		_kvm_err(kd, kd->program, "cannot read l1pt");
		free(l1pt);
		return (-1);
	}
	vm->l1pt = l1pt;
	return 0;
}
Пример #23
0
struct kinfo_proc *
kvm_getprocs(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
{
	int mib[6], st, nthreads;
	size_t size;

	if ((ssize_t)esize < 0)
		return (NULL);

	if (kd->procbase != NULL) {
		free(kd->procbase);
		/*
		 * Clear this pointer in case this call fails.  Otherwise,
		 * kvm_close() will free it again.
		 */
		kd->procbase = 0;
	}

	if (ISALIVE(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC;
		mib[2] = op;
		mib[3] = arg;
		mib[4] = esize;
		mib[5] = 0;
		st = sysctl(mib, 6, NULL, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (NULL);
		}

		mib[5] = size / esize;
		kd->procbase = _kvm_malloc(kd, size);
		if (kd->procbase == 0)
			return (NULL);
		st = sysctl(mib, 6, kd->procbase, &size, NULL, 0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getprocs");
			return (NULL);
		}
		nthreads = size / esize;
	} else {
		struct nlist nl[4];
		int i, maxthread;
		struct proc *p;
		char *bp;

		if (esize > sizeof(struct kinfo_proc)) {
			_kvm_syserr(kd, kd->program,
			    "kvm_getprocs: unknown fields requested: libkvm out of date?");
			return (NULL);
		}

		memset(nl, 0, sizeof(nl));
		nl[0].n_name = "_nthreads";
		nl[1].n_name = "_allproc";
		nl[2].n_name = "_zombproc";
		nl[3].n_name = NULL;

		if (kvm_nlist(kd, nl) != 0) {
			for (i = 0; nl[i].n_type != 0; ++i)
				;
			_kvm_err(kd, kd->program,
			    "%s: no such symbol", nl[i].n_name);
			return (NULL);
		}
		if (KREAD(kd, nl[0].n_value, &maxthread)) {
			_kvm_err(kd, kd->program, "can't read nthreads");
			return (NULL);
		}

		kd->procbase = _kvm_malloc(kd, maxthread * esize);
		if (kd->procbase == 0)
			return (NULL);
		bp = (char *)kd->procbase;

		/* allproc */
		if (KREAD(kd, nl[1].n_value, &p)) {
			_kvm_err(kd, kd->program, "cannot read allproc");
			return (NULL);
		}
		nthreads = kvm_proclist(kd, op, arg, p, bp, maxthread, esize);
		if (nthreads < 0)
			return (NULL);

		/* zombproc */
		if (KREAD(kd, nl[2].n_value, &p)) {
			_kvm_err(kd, kd->program, "cannot read zombproc");
			return (NULL);
		}
		i = kvm_proclist(kd, op, arg, p, bp + (esize * nthreads),
		    maxthread - nthreads, esize);
		if (i > 0)
			nthreads += i;
	}
	if (kd->procbase != NULL)
		*cnt = nthreads;
	return (kd->procbase);
}
Пример #24
0
static int
_mips_minidump_initvtop(kvm_t *kd)
{
	struct vmstate *vmst;
	off_t off, sparse_off;

	vmst = _kvm_malloc(kd, sizeof(*vmst));
	if (vmst == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}

	kd->vmst = vmst;

	if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64 ||
	    kd->nlehdr.e_flags & EF_MIPS_ABI2)
		vmst->pte_size = 64;
	else
		vmst->pte_size = 32;

	if (pread(kd->pmfd, &vmst->hdr,
	    sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) {
		_kvm_err(kd, kd->program, "cannot read dump header");
		return (-1);
	}

	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
	    sizeof(vmst->hdr.magic)) != 0) {
		_kvm_err(kd, kd->program, "not a minidump for this platform");
		return (-1);
	}
	vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
	if (vmst->hdr.version != MINIDUMP_VERSION) {
		_kvm_err(kd, kd->program, "wrong minidump version. "
		    "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
		return (-1);
	}
	vmst->hdr.msgbufsize = _kvm32toh(kd, vmst->hdr.msgbufsize);
	vmst->hdr.bitmapsize = _kvm32toh(kd, vmst->hdr.bitmapsize);
	vmst->hdr.ptesize = _kvm32toh(kd, vmst->hdr.ptesize);
	vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase);
	vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase);
	vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend);

	/* Skip header and msgbuf */
	off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize);

	sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) +
	    mips_round_page(vmst->hdr.ptesize);
	if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
	    MIPS_PAGE_SIZE, sizeof(uint32_t)) == -1) {
		return (-1);
	}
	off += mips_round_page(vmst->hdr.bitmapsize);

	if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
		return (-1);
	}
	off += mips_round_page(vmst->hdr.ptesize);

	return (0);
}
Пример #25
0
static int
_aarch64_minidump_initvtop(kvm_t *kd)
{
	struct vmstate *vmst;
	off_t off, sparse_off;

	vmst = _kvm_malloc(kd, sizeof(*vmst));
	if (vmst == NULL) {
		_kvm_err(kd, kd->program, "cannot allocate vm");
		return (-1);
	}
	kd->vmst = vmst;
	if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) !=
	    sizeof(vmst->hdr)) {
		_kvm_err(kd, kd->program, "cannot read dump header");
		return (-1);
	}
	if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic,
	    sizeof(vmst->hdr.magic)) != 0) {
		_kvm_err(kd, kd->program, "not a minidump for this platform");
		return (-1);
	}

	vmst->hdr.version = le32toh(vmst->hdr.version);
	if (vmst->hdr.version != MINIDUMP_VERSION) {
		_kvm_err(kd, kd->program, "wrong minidump version. "
		    "Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
		return (-1);
	}
	vmst->hdr.msgbufsize = le32toh(vmst->hdr.msgbufsize);
	vmst->hdr.bitmapsize = le32toh(vmst->hdr.bitmapsize);
	vmst->hdr.pmapsize = le32toh(vmst->hdr.pmapsize);
	vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
	vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
	vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
	vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);

	/* Skip header and msgbuf */
	off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize);

	/* build physical address lookup table for sparse pages */
	sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) +
	    aarch64_round_page(vmst->hdr.pmapsize);
	if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
	    AARCH64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
		_kvm_err(kd, kd->program, "cannot load core bitmap");
		return (-1);
	}
	off += aarch64_round_page(vmst->hdr.bitmapsize);

	vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize);
	if (vmst->page_map == NULL) {
		_kvm_err(kd, kd->program,
		    "cannot allocate %d bytes for page_map",
		    vmst->hdr.pmapsize);
		return (-1);
	}
	/* This is the end of the dump, savecore may have truncated it. */
	/*
	 * XXX: This doesn't make sense.  The pmap is not at the end,
	 * and if it is truncated we don't have any actual data (it's
	 * all stored after the bitmap and pmap.  -- jhb
	 */
	if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) <
	    AARCH64_PAGE_SIZE) {
		_kvm_err(kd, kd->program, "cannot read %d bytes for page_map",
		    vmst->hdr.pmapsize);
		return (-1);
	}
	off += aarch64_round_page(vmst->hdr.pmapsize);

	return (0);
}