コード例 #1
0
ファイル: vmm.c プロジェクト: plurmiscuous/JinxOS
void
init_vmm(void) {
    extern char bstack[];

    // allocate a page directory for the kernel
    kpd = kpalloc();

    boot_map(kpd, (void*) KADDR, 0, npages * PG_SIZE, true, false);
    boot_map(kpd, (void*) (KADDR - BIT(15)), PADDR(bstack), BIT(15), true, false);

    lcr3(PADDR(kpd));

    uint32_t cr0 = rcr0();
    cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_MP;
    cr0 &= ~(CR0_TS | CR0_EM);
    lcr0(cr0);

    mbi = VADDR(mbi);

    isr_install_handler(ISR_PGFLT, isr_pgfault);
}
コード例 #2
0
ファイル: map.c プロジェクト: spectrec/ann
pte_t *mmap_lookup(pml4e_t *pml4, uint64_t va, bool create)
{
	struct page *page4pdp = NULL, *page4pd = NULL, *page4pt = NULL;
	pdpe_t pml4e = pml4[PML4_IDX(va)];

	if ((pml4e & PML4E_P) != 0)
		goto pml4e_found;
	if (create == false)
		return NULL;

	// Prepare new page directory pointer
	if ((page4pdp = page_alloc()) == NULL)
		return NULL;
	memset(page2kva(page4pdp), 0, PAGE_SIZE);
	page4pdp->ref = 1;

	// Insert new pdp into PML4
	pml4e = pml4[PML4_IDX(va)] = page2pa(page4pdp) | PML4E_P | PML4E_W | PML4E_U;

pml4e_found:
	assert((pml4e & PML4E_P) != 0);

	pdpe_t *pdp = VADDR(PML4E_ADDR(pml4e));
	pdpe_t pdpe = pdp[PDP_IDX(va)];

	if ((pdpe & PDPE_P) != 0)
		goto pdpe_found;
	if (create == false)
		return NULL;

	// Prepare new page directory
	if ((page4pd = page_alloc()) == NULL)
		return NULL;
	memset(page2kva(page4pd), 0, PAGE_SIZE);
	page4pd->ref = 1;

	// Insert new page directory into page directory pointer table
	pdpe = pdp[PDP_IDX(va)] = page2pa(page4pd) | PDPE_P | PDPE_W | PDPE_U;

pdpe_found:
	assert((pdpe & PDPE_P) != 0);

	pde_t *pd = VADDR(PDPE_ADDR(pdpe));
	pde_t pde = pd[PD_IDX(va)];

	if ((pde & PDE_P) != 0)
		goto pde_found;
	if (create == false)
		return NULL;

	// Prepare new page table
	if ((page4pt = page_alloc()) == NULL)
		return NULL;
	memset(page2kva(page4pt), 0, PAGE_SIZE);
	page4pt->ref = 1;

	// Insert new page table into page directory
	pde = pd[PD_IDX(va)] = page2pa(page4pt) | PDE_P | PTE_W | PDE_U;

pde_found:
	assert((pde & PDE_P) != 0);

	pte_t *pt = VADDR(PDE_ADDR(pde));

	return &pt[PT_IDX(va)];
}
コード例 #3
0
ファイル: map.c プロジェクト: spectrec/ann
void *page2kva(struct page *p)
{
	return VADDR(page2pa(p));
}
コード例 #4
0
ファイル: mem.c プロジェクト: Alkzndr/freebsd
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0;
	vm_paddr_t pa;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr;

	/* XXX UPS Why ? */
	GIANT_REQUIRED;


	if (dev2unit(dev) != CDEV_MINOR_MEM && dev2unit(dev) != CDEV_MINOR_KMEM)
		return EIO;

	if (dev2unit(dev) == CDEV_MINOR_KMEM && uio->uio_resid > 0) {
		if (uio->uio_offset < (vm_offset_t)VADDR(PTDPTDI, 0))
				return (EFAULT);

		if (!kernacc((caddr_t)(int)uio->uio_offset, uio->uio_resid,
		    uio->uio_rw == UIO_READ ?  VM_PROT_READ : VM_PROT_WRITE))
			return (EFAULT);
	}

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset;
			pa &= ~PAGE_MASK;
		} else {
			/*
			 * Extract the physical page since the mapping may
			 * change at any time. This avoids panics on page 
			 * fault in this case but will cause reading/writing
			 * to the wrong page.
			 * Hopefully an application will notice the wrong
			 * data on read access and refrain from writing.
			 * This should be replaced by a special uiomove
			 * type function that just returns an error if there
			 * is a page fault on a kernel page. 
			 */
			addr = trunc_page(uio->uio_offset);
			pa = pmap_extract(kernel_pmap, addr);
			if (pa == 0) 
				return EFAULT;

		}
		
		/* 
		 * XXX UPS This should just use sf_buf_alloc.
		 * Unfortunately sf_buf_alloc needs a vm_page
		 * and we may want to look at memory not covered
		 * by the page array.
		 */

		sx_xlock(&memsxlock);
		pmap_kenter((vm_offset_t)ptvmmap, pa);
		pmap_invalidate_page(kernel_pmap,(vm_offset_t)ptvmmap);

		o = (int)uio->uio_offset & PAGE_MASK;
		c = PAGE_SIZE - o;
		c = min(c, (u_int)iov->iov_len);
		error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
		pmap_qremove((vm_offset_t)ptvmmap, 1);
		sx_xunlock(&memsxlock);
		
	}

	return (error);
}
コード例 #5
0
static dump_struct dump_para;
struct compare_group *cmp_group = NULL;
struct write_group *wt_group = NULL;
/* for sunxi-reg misc driver */
static dump_struct misc_dump_para;
struct compare_group *misc_cmp_group = NULL;
struct write_group *misc_wt_group = NULL;

#define VADDR(x)	((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET)

u32 addr_table[][2] = {
	{AW_IO_PHYS_BASE,		AW_IO_PHYS_BASE + AW_IO_SIZE		},
	{PLAT_PHYS_OFFSET,		PLAT_PHYS_OFFSET + SZ_1G		},

	{(u32)IO_ADDRESS(AW_IO_PHYS_BASE),	(u32)(IO_ADDRESS(AW_IO_PHYS_BASE) + AW_IO_SIZE)	},
	{VADDR(PLAT_PHYS_OFFSET), 	VADDR(PLAT_PHYS_OFFSET) + SZ_1G	-1	}, /* -1 to avoid overflow */
};

/**
 * __addr_valid - check if the addr is valid
 * @addr: addr to judge
 * 
 * return true if the addr is register addr, false if not.
 */
bool __addr_valid(u32 addr)
{
	int i;
	for(i = 0; i < ARRAY_SIZE(addr_table); i++)
		if(addr >= addr_table[i][0] && addr < addr_table[i][1])
			return true;
	return false;