int _kvm_stat_ntfs(kvm_t *kd, struct kinfo_file *kf, struct vnode *vp) { struct ntnode ntnode; struct fnode fn; struct ntfsmount ntm; /* * To get the ntnode, we have to go in two steps - firstly * to read appropriate struct fnode and then getting the address * of ntnode and reading it's contents */ if (KREAD(kd, (u_long)VTOF(vp), &fn)) { _kvm_err(kd, kd->program, "can't read fnode at %p", VTOF(vp)); return (-1); } if (KREAD(kd, (u_long)FTONT(&fn), &ntnode)) { _kvm_err(kd, kd->program, "can't read ntnode at %p", FTONT(&fn)); return (-1); } if (KREAD(kd, (u_long)ntnode.i_mp, &ntm)) { _kvm_err(kd, kd->program, "can't read ntfsmount at %p", ntnode.i_mp); return (-1); } kf->va_fsid = ntnode.i_dev & 0xffff; kf->va_fileid = (long)ntnode.i_number; kf->va_mode = (mode_t)ntm.ntm_mode | _kvm_getftype(vp->v_type); kf->va_size = fn.f_size; kf->va_rdev = 0; /* XXX */ return (0); }
static int _kvm_minidump_vatop_v1(kvm_t *kd, u_long va, off_t *pa) { struct vmstate *vm; u_long offset; pt_entry_t pte; u_long pteindex; u_long a; off_t ofs; vm = kd->vmst; offset = va & (PAGE_SIZE - 1); if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; pte = vm->page_map[pteindex]; if (((u_long)pte & PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); goto invalid; } a = pte & PG_FRAME; ofs = hpt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); goto invalid; } *pa = ofs + offset; return (PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
/* * Build proc info array by reading in proc list from a crash dump. * We reallocate kd->procbase as necessary. */ static int kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc, u_long a_zombproc) { struct kinfo_proc *bp = kd->procbase; int acnt, zcnt; struct proc *p; if (KREAD(kd, a_allproc, &p)) { _kvm_err(kd, kd->program, "cannot read allproc"); return (-1); } acnt = kvm_proclist(kd, what, arg, p, bp); if (acnt < 0) return (acnt); if (KREAD(kd, a_zombproc, &p)) { _kvm_err(kd, kd->program, "cannot read zombproc"); return (-1); } zcnt = kvm_proclist(kd, what, arg, p, bp + acnt); if (zcnt < 0) zcnt = 0; return (acnt + zcnt); }
static int nlist_init(kvm_t *kd) { if (kvm_swap_nl_cached) return (1); if (kvm_nlist(kd, kvm_swap_nl) < 0) return (0); /* Required entries */ if (kvm_swap_nl[NL_SWTAILQ].n_value == 0) { _kvm_err(kd, kd->program, "unable to find swtailq"); return (0); } if (kvm_swap_nl[NL_DMMAX].n_value == 0) { _kvm_err(kd, kd->program, "unable to find dmmax"); return (0); } /* Get globals, type of swap */ KGET(NL_DMMAX, &dmmax); kvm_swap_nl_cached = 1; return (1); }
static int _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; amd64_physaddr_t offset; amd64_pte_t pte; kvaddr_t pteindex; amd64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AMD64_PAGE_MASK; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT; if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; pte = le64toh(vm->page_map[pteindex]); if ((pte & AMD64_PG_V) == 0) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: pte not valid"); goto invalid; } a = pte & AMD64_PG_FRAME; ofs = _kvm_pt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AMD64_PAGE_SIZE - offset); } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
static int _kvm_minidump_vatop(kvm_t *kd, u_long va, off_t *pa) { struct vmstate *vm; u_long offset; pt_entry_t pte; u_long pteindex; u_long a; off_t ofs; uint32_t *ptemap; vm = kd->vmst; ptemap = vm->ptemap; offset = va & (PAGE_SIZE - 1); if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; pte = ptemap[pteindex]; if ((pte & PG_V) == 0) { _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); goto invalid; } a = pte & PG_FRAME; ofs = hpt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_kvm_vatop: physical address 0x%lx not in minidump", a); goto invalid; } *pa = ofs + offset; return (PAGE_SIZE - offset); } else {
static int _sparc64_initvtop(kvm_t *kd) { struct sparc64_dump_hdr hdr; struct sparc64_dump_reg *regs; struct vmstate *vm; size_t regsz; uint64_t pa; int i; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vm; if (!_sparc64_read_phys(kd, 0, &hdr, sizeof(hdr))) goto fail_vm; hdr.dh_hdr_size = be64toh(hdr.dh_hdr_size); hdr.dh_tsb_pa = be64toh(hdr.dh_tsb_pa); hdr.dh_tsb_size = be64toh(hdr.dh_tsb_size); hdr.dh_tsb_mask = be64toh(hdr.dh_tsb_mask); hdr.dh_nregions = be32toh(hdr.dh_nregions); pa = hdr.dh_tsb_pa; regsz = hdr.dh_nregions * sizeof(*regs); regs = _kvm_malloc(kd, regsz); if (regs == NULL) { _kvm_err(kd, kd->program, "cannot allocate regions"); goto fail_vm; } if (!_sparc64_read_phys(kd, sizeof(hdr), regs, regsz)) goto fail_regs; for (i = 0; i < hdr.dh_nregions; i++) { regs[i].dr_pa = be64toh(regs[i].dr_pa); regs[i].dr_size = be64toh(regs[i].dr_size); regs[i].dr_offs = be64toh(regs[i].dr_offs); } qsort(regs, hdr.dh_nregions, sizeof(*regs), _sparc64_reg_cmp); vm->vm_tsb_mask = hdr.dh_tsb_mask; vm->vm_regions = regs; vm->vm_nregions = hdr.dh_nregions; vm->vm_tsb_off = _sparc64_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size); if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) { _kvm_err(kd, kd->program, "tsb not found in dump"); goto fail_regs; } return (0); fail_regs: free(regs); fail_vm: free(vm); return (-1); }
int kvm_getcptime(kvm_t *kd, long *cp_time) { struct pcpu *pc; int i, j, maxcpu; if (kd == NULL) { kvm_cp_time_cached = 0; return (0); } if (ISALIVE(kd)) return (getsysctl(kd, "kern.cp_time", cp_time, sizeof(long) * CPUSTATES)); if (!kd->arch->ka_native(kd)) { _kvm_err(kd, kd->program, "cannot read cp_time from non-native core"); return (-1); } if (kvm_cp_time_cached == 0) { if (_kvm_cp_time_init(kd) < 0) return (-1); } /* If this kernel has a "cp_time[]" symbol, then just read that. */ if (kvm_cp_time_nl[NL_CP_TIME].n_value != 0) { if (kvm_read(kd, kvm_cp_time_nl[NL_CP_TIME].n_value, cp_time, sizeof(long) * CPUSTATES) != sizeof(long) * CPUSTATES) { _kvm_err(kd, kd->program, "cannot read cp_time array"); return (-1); } return (0); } /* * If we don't have that symbol, then we have to simulate * "cp_time[]" by adding up the individual times for each CPU. */ maxcpu = kvm_getmaxcpu(kd); if (maxcpu < 0) return (-1); for (i = 0; i < CPUSTATES; i++) cp_time[i] = 0; for (i = 0; i < maxcpu; i++) { pc = kvm_getpcpu(kd, i); if (pc == NULL) continue; if (pc == (void *)-1) return (-1); for (j = 0; j < CPUSTATES; j++) cp_time[j] += pc->pc_cp_time[j]; free(pc); } return (0); }
int _kvm_initvtop(kvm_t *kd) { struct vmstate *vm; struct nlist nl[4]; struct uvmexp uvmexp; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == 0) return (-1); kd->vmst = vm; nl[0].n_name = "Sysmap"; nl[1].n_name = "Sysmapsize"; nl[2].n_name = "uvmexp"; nl[3].n_name = 0; if (kvm_nlist(kd, nl) != 0) { _kvm_err(kd, kd->program, "bad namelist"); return (-1); } if (KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmap)) { _kvm_err(kd, kd->program, "cannot read Sysmap"); return (-1); } if (KREAD(kd, (u_long)nl[1].n_value, &vm->Sysmapsize)) { _kvm_err(kd, kd->program, "cannot read Sysmapsize"); return (-1); } /* * We are only interested in the first three fields of struct * uvmexp, so do not try to read more than necessary (especially * in case the layout changes). */ if (kvm_read(kd, (u_long)nl[2].n_value, &uvmexp, 3 * sizeof(int)) != 3 * sizeof(int)) { _kvm_err(kd, kd->program, "cannot read uvmexp"); return (-1); } vm->pagesize = uvmexp.pagesize; vm->pagemask = uvmexp.pagemask; vm->pageshift = uvmexp.pageshift; /* * Older kernels might not have this symbol; in which case * we use the value of VM_MIN_KERNE_ADDRESS they must have. */ nl[0].n_name = "Sysmapbase"; nl[1].n_name = 0; if (kvm_nlist(kd, nl) != 0 || KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmapbase)) vm->Sysmapbase = (vaddr_t)CKSSEG_BASE; return (0); }
static int _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; arm_pt_entry_t pte; arm_physaddr_t offset, a; kvaddr_t pteindex; off_t ofs; arm_pt_entry_t *ptemap; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!"); return (0); } vm = kd->vmst; ptemap = vm->ptemap; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(*ptemap)) goto invalid; pte = _kvm32toh(kd, ptemap[pteindex]); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not valid"); goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page -> convert to be like 4K page */ offset = va & ARM_L2_S_OFFSET; a = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (kd->vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not supported"); goto invalid; } /* 4K page */ offset = va & ARM_L2_S_OFFSET; a = pte & ARM_L2_S_FRAME; } ofs = _kvm_pt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (ARM_PAGE_SIZE - offset); } else
/* * Translate a kernel virtual address to a physical address. */ int _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) { struct vmstate *vm; pt_entry_t pte; u_long idx, addr; int offset; if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return((off_t)0); } vm = kd->vmst; offset = (int)va & vm->pagemask; /* * If we are initializing (kernel segment table pointer not yet set) * then return pa == va to avoid infinite recursion. */ if (vm->Sysmap == 0) { *pa = va; return vm->pagesize - offset; } /* * Check for direct-mapped segments */ if (IS_XKPHYS(va)) { *pa = XKPHYS_TO_PHYS(va); return vm->pagesize - offset; } if (va >= (vaddr_t)CKSEG0_BASE && va < (vaddr_t)CKSSEG_BASE) { *pa = CKSEG0_TO_PHYS(va); return vm->pagesize - offset; } if (va < vm->Sysmapbase) goto invalid; idx = (va - vm->Sysmapbase) >> vm->pageshift; if (idx >= vm->Sysmapsize) goto invalid; addr = (u_long)vm->Sysmap + idx; /* * Can't use KREAD to read kernel segment table entries. * Fortunately it is 1-to-1 mapped so we don't have to. */ if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte), (off_t)addr) < 0) goto invalid; if (!(pte & PG_V)) goto invalid; *pa = (pte & PG_FRAME) | (paddr_t)offset; return vm->pagesize - offset; invalid: _kvm_err(kd, 0, "invalid address (%lx)", va); return (0); }
/* * Map the ELF headers into the process' address space. We do this in two * steps: first the ELF header itself and using that information the whole * set of headers. */ static int powerpc_maphdrs(kvm_t *kd) { struct vmstate *vm; size_t mapsz; vm = kd->vmst; vm->mapsz = PAGE_SIZE; vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefile"); return (-1); } vm->dmphdrsz = 0; vm->eh = vm->map; if (!valid_elf_header(vm->eh)) { /* * Hmmm, no ELF header. Maybe we still have a dump header. * This is normal when the core file wasn't created by * savecore(8), but instead was dumped over TFTP. We can * easily skip the dump header... */ vm->dmphdrsz = dump_header_size(vm->map); if (vm->dmphdrsz == 0) goto inval; vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); if (!valid_elf_header(vm->eh)) goto inval; } mapsz = be16toh(vm->eh->e_phentsize) * be16toh(vm->eh->e_phnum) + be32toh(vm->eh->e_phoff); munmap(vm->map, vm->mapsz); /* Map all headers. */ vm->mapsz = vm->dmphdrsz + mapsz; vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0); if (vm->map == MAP_FAILED) { _kvm_err(kd, kd->program, "cannot map corefle headers"); return (-1); } vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz); vm->ph = (void *)((uintptr_t)vm->eh + be32toh(vm->eh->e_phoff)); return (0); inval: munmap(vm->map, vm->mapsz); vm->map = MAP_FAILED; _kvm_err(kd, kd->program, "invalid corefile"); return (-1); }
int _kvm_minidump_initvtop(kvm_t *kd) { struct vmstate *vmst; off_t off; vmst = _kvm_malloc(kd, sizeof(*vmst)); if (vmst == 0) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vmst; vmst->minidump = 1; if (pread(kd->pmfd, &vmst->hdr, sizeof(vmst->hdr), 0) != sizeof(vmst->hdr)) { _kvm_err(kd, kd->program, "cannot read dump header"); return (-1); } if (strncmp(MINIDUMP_MAGIC, vmst->hdr.magic, sizeof(vmst->hdr.magic)) != 0) { _kvm_err(kd, kd->program, "not a minidump for this platform"); return (-1); } if (vmst->hdr.version != MINIDUMP_VERSION) { _kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version); return (-1); } /* Skip header and msgbuf */ off = PAGE_SIZE + round_page(vmst->hdr.msgbufsize); vmst->bitmap = _kvm_malloc(kd, vmst->hdr.bitmapsize); if (vmst->bitmap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for bitmap", vmst->hdr.bitmapsize); return (-1); } if (pread(kd->pmfd, vmst->bitmap, vmst->hdr.bitmapsize, off) != (ssize_t)vmst->hdr.bitmapsize) { _kvm_err(kd, kd->program, "cannot read %d bytes for page bitmap", vmst->hdr.bitmapsize); return (-1); } off += round_page(vmst->hdr.bitmapsize); vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize); if (vmst->ptemap == NULL) { _kvm_err(kd, kd->program, "cannot allocate %d bytes for ptemap", vmst->hdr.ptesize); return (-1); } if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) != (ssize_t)vmst->hdr.ptesize) { _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", vmst->hdr.ptesize); return (-1); } off += vmst->hdr.ptesize; /* build physical address hash table for sparse pages */ inithash(kd, vmst->bitmap, vmst->hdr.bitmapsize, off); return (0); }
static int getsysctl(kvm_t *kd, const char *name, void *ptr, size_t len) { size_t nlen = len; if (sysctlbyname(name, ptr, &nlen, NULL, 0) == -1) { _kvm_err(kd, kd->program, "cannot read sysctl %s:%s", name, strerror(errno)); return (0); } if (nlen != len) { _kvm_err(kd, kd->program, "sysctl %s has unexpected size", name); return (0); } return (1); }
/* * Read from user space. The user context is given by pid. */ ssize_t kvm_uread(kvm_t *kd, pid_t pid, u_long uva, char *buf, size_t len) { char *cp; char procfile[MAXPATHLEN]; ssize_t amount; int fd; if (!kvm_ishost(kd)) { /* XXX: vkernels */ _kvm_err(kd, kd->program, "cannot read user space from dead kernel"); return (0); } sprintf(procfile, "/proc/%d/mem", pid); fd = open(procfile, O_RDONLY, 0); if (fd < 0) { _kvm_err(kd, kd->program, "cannot open %s", procfile); close(fd); return (0); } cp = buf; while (len > 0) { errno = 0; if (lseek(fd, (off_t)uva, 0) == -1 && errno != 0) { _kvm_err(kd, kd->program, "invalid address (%lx) in %s", uva, procfile); break; } amount = read(fd, cp, len); if (amount < 0) { _kvm_syserr(kd, kd->program, "error reading %s", procfile); break; } if (amount == 0) { _kvm_err(kd, kd->program, "EOF reading %s", procfile); break; } cp += amount; uva += amount; len -= amount; } close(fd); return ((ssize_t)(cp - buf)); }
int _kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa) { _kvm_err(kd, 0, "vatop not yet implemented!"); return -1; }
int _kvm_initvtop(kvm_t *kd) { struct vmstate *vm; struct stat st; struct nlist nl[2]; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == 0) return (-1); kd->vmst = vm; if (fstat(kd->pmfd, &st) < 0) return (-1); /* Get end of kernel address */ nl[0].n_name = "_end"; nl[1].n_name = 0; if (kvm_nlist(kd, nl) != 0) { _kvm_err(kd, kd->program, "pmap_stod: no such symbol"); return (-1); } vm->end = (u_long)nl[0].n_value; return (0); }
int _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) { struct tte tte; off_t tte_off; u_long vpn; off_t pa_off; u_long pg_off; int rest; pg_off = va & PAGE_MASK; if (va >= VM_MIN_DIRECT_ADDRESS) pa_off = TLB_DIRECT_TO_PHYS(va) & ~PAGE_MASK; else { vpn = btop(va); tte_off = kd->vmst->vm_tsb_off + ((vpn & kd->vmst->vm_tsb_mask) << TTE_SHIFT); if (!_kvm_read_phys(kd, tte_off, &tte, sizeof(tte))) goto invalid; if (!tte_match(&tte, va)) goto invalid; pa_off = TTE_GET_PA(&tte); } rest = PAGE_SIZE - pg_off; pa_off = _kvm_find_off(kd->vmst, pa_off, rest); if (pa_off == KVM_OFF_NOTFOUND) goto invalid; *pa = pa_off + pg_off; return (rest); invalid: _kvm_err(kd, 0, "invalid address (%lx)", va); return (0); }
int _kvm_initvtop(kvm_t *kd) { _kvm_err(kd, 0, "initvtop not yet implemented!"); return (0); }
/* * Translate a physical address to a file-offset in the crash dump. */ off_t _kvm_pa2off(kvm_t *kd, paddr_t pa) { cpu_kcore_hdr_t *cpu_kh; phys_ram_seg_t *ramsegs; off_t off; int i; cpu_kh = kd->cpu_data; ramsegs = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh)); off = 0; for (i = 0; i < cpu_kh->nmemsegs; i++) { if (pa >= ramsegs[i].start && (pa - ramsegs[i].start) < ramsegs[i].size) { off += (pa - ramsegs[i].start); break; } off += ramsegs[i].size; } if (i == cpu_kh->nmemsegs) _kvm_err(kd, 0, "pa %lx not in dump", pa); return (kd->dump_off + off); }
/* * Translate a physical address to a file-offset in the crash-dump. */ off_t _kvm_pa2off(kvm_t *kd, paddr_t pa) { cpu_kcore_hdr_t *cpup = kd->cpu_data; phys_ram_seg_t *mp; off_t off; int nmem; /* * Layout of CPU segment: * cpu_kcore_hdr_t; * [alignment] * phys_ram_seg_t[cpup->nmemseg]; */ mp = (phys_ram_seg_t *)((int)kd->cpu_data + cpup->memsegoffset); off = 0; /* Translate (sparse) pfnum to (packed) dump offset */ for (nmem = cpup->nmemseg; --nmem >= 0; mp++) { if (mp->start <= pa && pa < mp->start + mp->size) break; off += mp->size; } if (nmem < 0) { _kvm_err(kd, 0, "invalid address (%x)", pa); return (-1); } return (kd->dump_off + off + pa - mp->start); }
off_t _kvm_pa2off(kvm_t *kd, paddr_t pa) { _kvm_err(kd, 0, "pa2off not yet implemented!"); return -1; }
int _kvm_mdopen(kvm_t *kd) { _kvm_err(kd, 0, "mdopen not yet implemented!"); return -1; }
/* * If the current element is the left side of the parent the next element * will be a left side traversal of the parent's right side. If the parent * has no right side the next element will be the parent. * * If the current element is the right side of the parent the next element * is the parent. * * If the parent is NULL we are done. */ static uintptr_t kvm_nextlwp(kvm_t *kd, uintptr_t lwppos, struct lwp *lwp, struct proc *proc) { uintptr_t nextpos; nextpos = (uintptr_t)lwp->u.lwp_rbnode.rbe_parent; if (nextpos) { if (KREAD(kd, nextpos, lwp)) { _kvm_err(kd, kd->program, "can't read lwp at %p", (void *)lwppos); return ((uintptr_t)-1); } if (lwppos == (uintptr_t)lwp->u.lwp_rbnode.rbe_left) { /* * If we had gone down the left side the next element * is a left hand traversal of the parent's right * side, or the parent itself if there is no right * side. */ lwppos = (uintptr_t)lwp->u.lwp_rbnode.rbe_right; if (lwppos) nextpos = kvm_lwptraverse(kd, lwp, lwppos); } else { /* * If we had gone down the right side the next * element is the parent. */ /* nextpos = nextpos */ } } return(nextpos); }
/* * Read from user space. The user context is given by p. */ static ssize_t kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long uva, char *buf, size_t len) { char *cp; cp = buf; while (len > 0) { size_t cc; char *dp; u_long cnt; dp = _kvm_ureadm(kd, p, uva, &cnt); if (dp == NULL) { _kvm_err(kd, 0, "invalid address (%lx)", uva); return (0); } cc = (size_t)MIN(cnt, len); memcpy(cp, dp, cc); cp += cc; uva += cc; len -= cc; } return (ssize_t)(cp - buf); }
int _kvm_kvatop4m(kvm_t *kd, u_long va, u_long *pa) { cpu_kcore_hdr_t *cpup = kd->cpu_data; struct regmap *rp; struct segmap *sp; int vr, vs, pte; off_t foff; if (va < KERNBASE) goto err; /* * Layout of CPU segment: * cpu_kcore_hdr_t; * [alignment] * phys_ram_seg_t[cpup->nmemseg]; */ vr = VA_VREG(va); vs = VA_VSEG(va); sp = &cpup->segmap_store[(vr-NUREG)*NSEGRG + vs]; if (sp->sg_npte == 0) goto err; /* XXX - assume page tables in initial kernel DATA or BSS. */ foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - KERNBASE); if (foff == (off_t)-1) return (0); if (_kvm_pread(kd, kd->pmfd, (void *)&pte, sizeof(pte), foff) < 0) { _kvm_err(kd, kd->program, "cannot read pte for %x", va); return (0); } if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) { long p, off = VA_OFF(va); p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT; *pa = p + off; return (kd->nbpg - off); } err: _kvm_err(kd, 0, "invalid address (%x)", va); return (0); }
/* * Translate a kernel virtual address to a physical address. */ int _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) { u_long offset, pte_pa; struct vmstate *vm; pt_entry_t pte; if (!kd->vmst) { _kvm_err(kd, 0, "vatop called before initvtop"); return (0); } if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return (0); } vm = kd->vmst; offset = va & PGOFSET; /* * If we are initializing (kernel page table descriptor pointer * not yet set) * then return pa == va to avoid infinite recursion. */ if (vm->PTD == NULL) { *pa = va; return (NBPG - (int)offset); } if ((vm->PTD[pdei(va)] & PG_V) == 0) goto invalid; pte_pa = (vm->PTD[pdei(va)] & PG_FRAME) + (ptei(va) * sizeof(pt_entry_t)); /* XXX READ PHYSICAL XXX */ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof pte, (off_t)_kvm_pa2off(kd, pte_pa)) != sizeof pte) goto invalid; *pa = (pte & PG_FRAME) + offset; return (NBPG - (int)offset); invalid: _kvm_err(kd, 0, "invalid address (%lx)", va); return (0); }
int _kvm_initvtop(kvm_t *kd) { struct sparc64_dump_hdr hdr; struct sparc64_dump_reg *regs; struct vmstate *vm; size_t regsz; vm_offset_t pa; vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); if (vm == NULL) { _kvm_err(kd, kd->program, "cannot allocate vm"); return (-1); } kd->vmst = vm; if (!_kvm_read_phys(kd, 0, &hdr, sizeof(hdr))) goto fail_vm; pa = hdr.dh_tsb_pa; regsz = hdr.dh_nregions * sizeof(*regs); regs = _kvm_malloc(kd, regsz); if (regs == NULL) { _kvm_err(kd, kd->program, "cannot allocate regions"); goto fail_vm; } if (!_kvm_read_phys(kd, sizeof(hdr), regs, regsz)) goto fail_regs; qsort(regs, hdr.dh_nregions, sizeof(*regs), _kvm_reg_cmp); vm->vm_tsb_mask = hdr.dh_tsb_mask; vm->vm_regions = regs; vm->vm_nregions = hdr.dh_nregions; vm->vm_tsb_off = _kvm_find_off(vm, hdr.dh_tsb_pa, hdr.dh_tsb_size); if (vm->vm_tsb_off == KVM_OFF_NOTFOUND) { _kvm_err(kd, kd->program, "tsb not found in dump"); goto fail_regs; } return (0); fail_regs: free(regs); fail_vm: free(vm); return (-1); }
void _kvm_freevtop(kvm_t *kd) { if (kd->vmst != NULL) { _kvm_err(kd, kd->program, "_kvm_freevtop: internal error"); kd->vmst = NULL; } }
static int _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; aarch64_physaddr_t offset; aarch64_pte_t l3; kvaddr_t l3_index; aarch64_physaddr_t a; off_t ofs; vm = kd->vmst; offset = va & AARCH64_PAGE_MASK; if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) { a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) & ~AARCH64_PAGE_MASK; ofs = _kvm_pt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "direct map address 0x%jx not in minidump", (uintmax_t)va); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else if (va >= vm->hdr.kernbase) { l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT; if (l3_index >= vm->hdr.pmapsize / sizeof(*vm->page_map)) goto invalid; l3 = le64toh(vm->page_map[l3_index]); if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: pde not valid"); goto invalid; } a = l3 & ~AARCH64_ATTR_MASK; ofs = _kvm_pt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (AARCH64_PAGE_SIZE - offset); } else {