Exemple #1
0
int
_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
{
	struct vmstate *vm = kd->vmst;
	pd_entry_t pd;
	pt_entry_t pte;
	u_long pte_pa;

	if (kd->vmst->minidump)
		return (_kvm_minidump_kvatop(kd, va, pa));

	if (vm->l1pt == NULL)
		return (_kvm_pa2off(kd, va, pa, PAGE_SIZE));
	pd = vm->l1pt[L1_IDX(va)];
	if (!l1pte_valid(pd))
		goto invalid;
	if (l1pte_section_p(pd)) {
		/* 1MB section mapping. */
		*pa = ((u_long)pd & L1_S_ADDR_MASK) + (va & L1_S_OFFSET);
		return  (_kvm_pa2off(kd, *pa, pa, L1_S_SIZE));
	}
	pte_pa = (pd & L1_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
	_kvm_pa2off(kd, pte_pa, (off_t *)&pte_pa, L1_S_SIZE);
	if (lseek(kd->pmfd, pte_pa, 0) == -1) {
		_kvm_syserr(kd, kd->program, "_kvm_kvatop: lseek");
		goto invalid;
	}
	if (read(kd->pmfd, &pte, sizeof(pte)) != sizeof (pte)) {
		_kvm_syserr(kd, kd->program, "_kvm_kvatop: read");
		goto invalid;
	}
	if (!l2pte_valid(pte)) {
		goto invalid;
	}
	if ((pte & L2_TYPE_MASK) == L2_TYPE_L) {
		*pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
		return (_kvm_pa2off(kd, *pa, pa, L2_L_SIZE));
	}
	*pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
	return (_kvm_pa2off(kd, *pa, pa, PAGE_SIZE));
invalid:
	_kvm_err(kd, 0, "Invalid address (%lx)", va);
	return 0;
}
Exemple #2
0
int
_kvm_initvtop(kvm_t *kd)
{
	struct nlist nl[2];
	struct vmstate *vm;
	u_long pa;

	vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
	if (vm == NULL)
		return (-1);
	kd->vmst = vm;

	vm->PTD = NULL;

	nl[0].n_name = "_PTDpaddr";
	nl[1].n_name = NULL;

	if (kvm_nlist(kd, nl) != 0) {
		_kvm_err(kd, kd->program, "bad namelist");
		return (-1);
	}

	if (_kvm_pread(kd, kd->pmfd, &pa, sizeof pa,
	    (off_t)_kvm_pa2off(kd, nl[0].n_value - KERNBASE)) != sizeof pa)
		goto invalid;

	vm->PTD = (pd_entry_t *)_kvm_malloc(kd, NBPG);

	if (_kvm_pread(kd, kd->pmfd, vm->PTD, NBPG,
	    (off_t)_kvm_pa2off(kd, pa)) != NBPG)
		goto invalid;

	return (0);

invalid:
	if (vm->PTD != NULL) {
		free(vm->PTD);
		vm->PTD = NULL;
	}
	return (-1);
}
Exemple #3
0
static int
_arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
{
	struct vmstate *vm = kd->vmst;
	arm_pd_entry_t pd;
	arm_pt_entry_t pte;
	arm_physaddr_t pte_pa;
	off_t pte_off;

	if (vm->l1pt == NULL)
		return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
	pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
	if (!l1pte_valid(pd))
		goto invalid;
	if (l1pte_section_p(pd)) {
		/* 1MB section mapping. */
		*pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
		return  (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
	}
	pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
	_kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
	if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
		_kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
		goto invalid;
	}
	pte = _kvm32toh(kd, pte);
	if (!l2pte_valid(pte)) {
		goto invalid;
	}
	if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
		*pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
		return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
	}
	*pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
	return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
invalid:
	_kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
	return 0;
}
Exemple #4
0
/*
 * Translate a kernel virtual address to a physical address.
 */
int
_kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa)
{
	u_long offset, pte_pa;
	struct vmstate *vm;
	pt_entry_t pte;

	if (!kd->vmst) {
		_kvm_err(kd, 0, "vatop called before initvtop");
		return (0);
	}

	if (ISALIVE(kd)) {
		_kvm_err(kd, 0, "vatop called in live kernel!");
		return (0);
	}

	vm = kd->vmst;
	offset = va & PGOFSET;

	/*
	 * If we are initializing (kernel page table descriptor pointer
	 * not yet set) * then return pa == va to avoid infinite recursion.
	 */
	if (vm->PTD == NULL) {
		*pa = va;
		return (NBPG - (int)offset);
	}
	if ((vm->PTD[pdei(va)] & PG_V) == 0)
		goto invalid;

	pte_pa = (vm->PTD[pdei(va)] & PG_FRAME) +
	    (ptei(va) * sizeof(pt_entry_t));

	/* XXX READ PHYSICAL XXX */
	if (_kvm_pread(kd, kd->pmfd, &pte, sizeof pte,
	    (off_t)_kvm_pa2off(kd, pte_pa)) != sizeof pte)
		goto invalid;

	*pa = (pte & PG_FRAME) + offset;
	return (NBPG - (int)offset);

invalid:
	_kvm_err(kd, 0, "invalid address (%lx)", va);
	return (0);
}
Exemple #5
0
int
_kvm_kvatop4m(kvm_t *kd, u_long va, u_long *pa)
{
	cpu_kcore_hdr_t *cpup = kd->cpu_data;
	struct regmap *rp;
	struct segmap *sp;
	int vr, vs, pte;
	off_t foff;

	if (va < KERNBASE)
		goto err;

	/*
	 * Layout of CPU segment:
	 *	cpu_kcore_hdr_t;
	 *	[alignment]
	 *	phys_ram_seg_t[cpup->nmemseg];
	 */
	vr = VA_VREG(va);
	vs = VA_VSEG(va);

	sp = &cpup->segmap_store[(vr-NUREG)*NSEGRG + vs];
	if (sp->sg_npte == 0)
		goto err;

	/* XXX - assume page tables in initial kernel DATA or BSS. */
	foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - KERNBASE);
	if (foff == (off_t)-1)
		return (0);

	if (_kvm_pread(kd, kd->pmfd, (void *)&pte, sizeof(pte), foff) < 0) {
		_kvm_err(kd, kd->program, "cannot read pte for %x", va);
		return (0);
	}

	if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) {
		long p, off = VA_OFF(va);

		p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
		*pa = p + off;
		return (kd->nbpg - off);
	}
err:
	_kvm_err(kd, 0, "invalid address (%x)", va);
	return (0);
}
Exemple #6
0
/*
 * Translate a kernel virtual address to a physical address.
 */
int
_kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa)
{
	cpu_kcore_hdr_t *cpu_kh;
	paddr_t pde_pa, pte_pa;
	u_long page_off;
	pd_entry_t pde;
	pt_entry_t pte;

	if (ISALIVE(kd)) {
		_kvm_err(kd, 0, "vatop called in live kernel!");
		return (0);
	}

	page_off = va & (kd->nbpg - 1);

	if (va >= PMAP_DIRECT_BASE && va <= PMAP_DIRECT_END) {
		*pa = va - PMAP_DIRECT_BASE;
		return (int)(kd->nbpg - page_off);
	}

	cpu_kh = kd->cpu_data;

	/*
	 * Find and read all entries to get to the pa.
	 */

	/*
	 * Level 4.
	 */
	pde_pa = cpu_kh->ptdpaddr + (pl4_pi(va) * sizeof(pd_entry_t));
	if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PT level 4 entry");
		goto lose;
	}
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid level 4 PDE)");
		goto lose;
	}

	/*
	 * Level 3.
	 */
	pde_pa = (pde & PG_FRAME) + (pl3_pi(va) * sizeof(pd_entry_t));
	if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PT level 3 entry");
		goto lose;
	}
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid level 3 PDE)");
		goto lose;
	}

	/*
	 * Level 2.
	 */
	pde_pa = (pde & PG_FRAME) + (pl2_pi(va) * sizeof(pd_entry_t));
	if (pread(kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PT level 2 entry");
		goto lose;
	}
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid level 2 PDE)");
		goto lose;
	}

	/*
	 * Might be a large page.
	 */
	if ((pde & PG_PS) != 0) {
		page_off = va & (NBPD_L2 - 1);
		*pa = (pde & PG_LGFRAME) | page_off;
		return (int)(NBPD_L2 - page_off);
	}

	/*
	 * Level 1.
	 */
	pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
	if (pread(kd->pmfd, (void *) &pte, sizeof(pte),
	    _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
		_kvm_syserr(kd, 0, "could not read PTE");
		goto lose;
	}
	/*
	 * Validate the PTE and return the physical address.
	 */
	if ((pte & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid PTE)");
		goto lose;
	}
	*pa = (pte & PG_FRAME) + page_off;
	return (int)(kd->nbpg - page_off);

 lose:
	*pa = (u_long)~0L;
	return (0);
}
Exemple #7
0
int
_kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa)
{
	cpu_kcore_hdr_t *cpu_kh;
	struct vmstate *vm;
	int rv, page_off;
	alpha_pt_entry_t pte;
	off_t pteoff;

	if (!kd->vmst) {
		_kvm_err(kd, 0, "vatop called before initvtop");
		return (0);
	}

	if (ISALIVE(kd)) {
		_kvm_err(kd, 0, "vatop called in live kernel!");
		return (0);
	}

	cpu_kh = kd->cpu_data;
	vm = kd->vmst;
	page_off = va & (cpu_kh->page_size - 1);

#ifndef PAGE_SHIFT
#define	PAGE_SHIFT      vm->page_shift
#endif

	if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
		/*
		 * Direct-mapped address: just convert it.
		 */

		*pa = ALPHA_K0SEG_TO_PHYS(va);
		rv = cpu_kh->page_size - page_off;
	} else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
		/*
		 * Real kernel virtual address: do the translation.
		 */

		/* Find and read the L1 PTE. */
		pteoff = cpu_kh->lev1map_pa +
		    l1pte_index(va) * sizeof(alpha_pt_entry_t);
		if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
		    (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
			_kvm_syserr(kd, 0, "could not read L1 PTE");
			goto lose;
		}

		/* Find and read the L2 PTE. */
		if ((pte & ALPHA_PTE_VALID) == 0) {
			_kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
			goto lose;
		}
		pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
		    l2pte_index(va) * sizeof(alpha_pt_entry_t);
		if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
		    (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
			_kvm_syserr(kd, 0, "could not read L2 PTE");
			goto lose;
		}

		/* Find and read the L3 PTE. */
		if ((pte & ALPHA_PTE_VALID) == 0) {
			_kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
			goto lose;
		}
		pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
		    l3pte_index(va) * sizeof(alpha_pt_entry_t);
		if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
		    (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
			_kvm_syserr(kd, 0, "could not read L3 PTE");
			goto lose;
		}

		/* Fill in the PA. */
		if ((pte & ALPHA_PTE_VALID) == 0) {
			_kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
			goto lose;
		}
		*pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
		rv = cpu_kh->page_size - page_off;
	} else {
		/*
		 * Bogus address (not in KV space): punt.
		 */

		_kvm_err(kd, 0, "invalid kernel virtual address");
lose:
		*pa = -1;
		rv = 0;
	}

	return (rv);
}
/*
 * Used to translate a virtual address to a physical address for systems
 * running under PAE mode. Three levels of virtual memory pages are handled
 * here: the per-CPU L3 page, the 4 L2 PDs and the PTs.
 */
int
_kvm_kvatop_i386pae(kvm_t *kd, vaddr_t va, paddr_t *pa)
{
	cpu_kcore_hdr_t *cpu_kh;
	u_long page_off;
	pd_entry_t pde;
	pt_entry_t pte;
	paddr_t pde_pa, pte_pa;

	cpu_kh = kd->cpu_data;
	page_off = va & PGOFSET;
	
	/*
	 * Find and read the PDE. Ignore the L3, as it is only a per-CPU
	 * page, not needed for kernel VA => PA translations.
	 * Remember that the 4 L2 pages are contiguous, so it is safe
	 * to increment pdppaddr to compute the address of the PDE.
	 * pdppaddr being PAGE_SIZE aligned, we mask the option bits.
	 */
	pde_pa = (cpu_kh->pdppaddr & PG_FRAME) + (pl2_pi(va) * sizeof(pde));
	if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
	    _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
		_kvm_syserr(kd, 0, "could not read PDE");
		goto lose;
	}

	/*
	 * Find and read the page table entry.
	 */
	if ((pde & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid PDE)");
		goto lose;
	}
	if ((pde & PG_PS) != 0) {
		/*
		 * This is a 2MB page.
		 */
		page_off = va & ((vaddr_t)~PG_LGFRAME);
		*pa = (pde & PG_LGFRAME) + page_off;
		return (int)(NBPD_L2 - page_off);
	}

	pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
	if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pte),
	    _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
		_kvm_syserr(kd, 0, "could not read PTE");
		goto lose;
	}

	/*
	 * Validate the PTE and return the physical address.
	 */
	if ((pte & PG_V) == 0) {
		_kvm_err(kd, 0, "invalid translation (invalid PTE)");
		goto lose;
	}
	*pa = (pte & PG_FRAME) + page_off;
	return (int)(NBPG - page_off);

lose:
	*pa = (paddr_t)~0L;
	return 0;

}