Esempio n. 1
0
File: ept.c Progetto: HideSand/ksm
uintptr_t *ept_pte(struct ept *ept, uintptr_t *pml, uintptr_t phys)
{
	uintptr_t *pxe = page_addr(&pml[__pxe_idx(phys)]);
	uintptr_t *ppe = page_addr(&pxe[__ppe_idx(phys)]);
	uintptr_t *pde = page_addr(&ppe[__pde_idx(phys)]);
	return &pde[__pte_idx(phys)];
}
Esempio n. 2
0
File: ept.c Progetto: HideSand/ksm
uintptr_t *ept_alloc_page(struct ept *ept, uintptr_t *pml4, uint8_t access, uintptr_t phys)
{
	/* PML4 (512 GB) */
	uintptr_t *pml4e = &pml4[__pxe_idx(phys)];
	uintptr_t *pdpt = page_addr(pml4e);

	if (!*pml4e) {
		pdpt = ept_alloc_entry(ept);
		if (!pdpt)
			return NULL;

		ept_init_entry(pml4e, EPT_ACCESS_ALL, __pa(pdpt));
	}

	/* PDPT (1 GB)  */
	uintptr_t *pdpte = &pdpt[__ppe_idx(phys)];
	uintptr_t *pdt = page_addr(pdpte);
	if (!*pdpte) {
		pdt = ept_alloc_entry(ept);
		if (!pdt)
			return NULL;

		ept_init_entry(pdpte, EPT_ACCESS_ALL, __pa(pdt));
	}

	/* PDT (2 MB)  */
	uintptr_t *pdte = &pdt[__pde_idx(phys)];
	uintptr_t *pt = page_addr(pdte);
	if (!*pdte) {
		pt = ept_alloc_entry(ept);
		if (!pt)
			return NULL;

		ept_init_entry(pdte, EPT_ACCESS_ALL, __pa(pt));
	}

	/* PT (4 KB)  */
	uintptr_t *page = &pt[__pte_idx(phys)];
	ept_init_entry(page, access, phys);

	*page |= EPT_MT_WRITEBACK << VMX_EPT_MT_EPTE_SHIFT;
#ifdef EPT_SUPPRESS_VE
	*page |= EPT_SUPPRESS_VE_BIT;
#endif
	return page;
}
Esempio n. 3
0
//---------------------------------------------------------------------------
IMPLEMENTATION[!ppc32]:

PUBLIC
Address
Pdir::virt_to_phys(Address virt) const
{
  Virt_addr va(virt);
  auto i = walk(va);
  if (!i.is_valid())
    return Invalid_address;

  return i.page_addr() | cxx::get_lsb(virt, i.page_order());
}
PUBLIC
bool
Vm_vmx_ept::v_lookup(Mem_space::Vaddr virt, Mem_space::Phys_addr *phys,
                     Mem_space::Page_order *order, Mem_space::Attr *page_attribs)
{
  auto i = _ept->walk(virt);
  if (order) *order = Mem_space::Page_order(i.page_order());

  if (!i.is_valid())
    return false;

  // FIXME: we may get a problem on 32 bit systems and using more than 4G ram
  if (phys) *phys = Mem_space::Phys_addr(i.page_addr());
  if (page_attribs) *page_attribs = i.attribs();

  return true;
}
Esempio n. 5
0
static void handle_bio(struct bio *bio)
{
	if (bio->map.length % IDE_SECTOR_SIZE) {
		finish_bio(bio, BIO_FAILED);
		return;
	}

	char *page = page_addr(bio->map.page);
	const unsigned long long lba = bio->map.sector;
	const size_t count = bio->map.length / IDE_SECTOR_SIZE;
	void *data = page + bio->map.offset;
	const int rc = bio->dir == BIO_READ
				? read_sectors(data, lba, count)
				: write_sectors(data, lba, count);

	finish_bio(bio, rc == 0 ? BIO_FINISHED : BIO_FAILED);
}
Esempio n. 6
0
/*
** resolve pte (allocate pt if needed for non-large pde
** return 0 if large page
*/
static inline
npg_pte64_t* __npg_resolve_pte(npg_pde64_t *pde, offset_t addr, uint64_t attr)
{
   npg_pte64_t *pt;

   if(!npg_present(pde))
   {
      pt = __npg_new_pt();
      /* upper-level entry has full pvl */
      npg_set_entry(pde, attr|npg_dft_pvl, page_nr(pt));
   }
   else if(npg_large(pde))
      return 0;
   else
      pt = (npg_pte64_t*)page_addr(pde->addr);

   return &pt[pt64_idx(addr)];
}
Esempio n. 7
0
/*
** resolve pde (allocate pd if needed) for non-large pdpe
** return 0 if large page
*/
static inline
npg_pde64_t* __npg_resolve_pde(npg_pdpe_t *pdpe, offset_t addr, uint64_t attr)
{
   npg_pde64_t *pd;

   if(!npg_present(pdpe))
   {
      pd = __npg_new_pd();
      /* upper-level entry has full pvl */
      npg_set_entry(pdpe, attr|npg_dft_pvl, page_nr(pd));
   }
   else if(npg_large(pdpe))
      return 0;
   else
      pd = (npg_pde64_t*)page_addr(pdpe->addr);

   return &pd[pd64_idx(addr)];
}
Esempio n. 8
0
/*
** resolve pdpe (allocate pdp if needed)
*/
static inline npg_pdpe_t* __npg_resolve_pdpe(offset_t addr, uint64_t attr)
{
   vm_pgmem_t  *pg = npg_get_active_paging();
   npg_pml4e_t *pml4e;
   npg_pdpe_t  *pdp;

   pml4e = &pg->pml4[pml4_idx(addr)];
   if(!npg_present(pml4e))
   {
      pdp = __npg_new_pdp();
      /* upper-level entry has full pvl */
      npg_set_entry(pml4e, attr|npg_dft_pvl, page_nr(pdp));
   }
   else
      pdp = (npg_pdpe_t*)page_addr(pml4e->addr);

   return &pdp[pdp_idx(addr)];
}
PUBLIC
Mem_space::Status
Vm_vmx_ept::v_insert(Mem_space::Phys_addr phys, Mem_space::Vaddr virt,
                     Mem_space::Page_order size, Mem_space::Attr page_attribs)
{
  // insert page into page table

  // XXX should modify page table using compare-and-swap

  assert (cxx::get_lsb(Mem_space::Phys_addr(phys), size) == 0);
  assert (cxx::get_lsb(Virt_addr(virt), size) == 0);

  int level;
  for (level = 0; level <= Ept::Depth; ++level)
    if (Mem_space::Page_order(Ept::page_order_for_level(level)) <= size)
      break;

  auto i = _ept->walk(virt, level, false,
                      Kmem_alloc::q_allocator(ram_quota()));

  if (EXPECT_FALSE(!i.is_valid() && i.level != level))
    return Mem_space::Insert_err_nomem;

  if (EXPECT_FALSE(i.is_valid()
                   && (i.level != level || Mem_space::Phys_addr(i.page_addr()) != phys)))
    return Mem_space::Insert_err_exists;

  if (i.is_valid())
    {
      if (EXPECT_FALSE(!i.add_attribs(page_attribs)))
        return Mem_space::Insert_warn_exists;

      return Mem_space::Insert_warn_attrib_upgrade;
    }
  else
    {
      i.create_page(phys, page_attribs);
      return Mem_space::Insert_ok;
    }

}
Esempio n. 10
0
static void __npg_unmap_1G(npg_pdpe_t *pdpe)
{
   npg_pde64_t *pd;
   uint32_t    i;

   debug(PG_MAP, "unmap 1G 0x%X\n", pdpe->raw);
   if(!npg_large(pdpe))
   {
      pd = (npg_pde64_t*)page_addr(pdpe->addr);

      debug(PG_MAP, "unmap 2M for each pde\n");
      for(i=0 ; i<PDE64_PER_PD ; i++)
         if(npg_present(&pd[i]))
            __npg_unmap_2M(&pd[i]);

      debug(PG_MAP, "freeing pd\n");
      pool_push_page((offset_t)pd);
   }

   npg_zero(pdpe);
}
Esempio n. 11
0
static void __npg_unmap_2M(npg_pde64_t *pde)
{
   npg_pte64_t  *pt;
   uint32_t     i;

   debug(PG_MAP, "unmap 2M 0x%X\n", pde->raw);
   if(!npg_large(pde))
   {
      pt = (npg_pte64_t*)page_addr(pde->addr);

      debug(PG_MAP, "clear each pte\n");
      for(i=0 ; i<PTE64_PER_PT ; i++)
         if(npg_present(&pt[i]))
            npg_zero(&pt[i]);

      debug(PG_MAP, "freeing pt\n");
      pool_push_page((offset_t)pt);
   }

   npg_zero(pde);
}
Esempio n. 12
0
/*
** pmode: 4MB and 4KB pages
** cr4.pse is used
*/
static int __pg_walk_pmode(cr3_reg_t *cr3, offset_t _vaddr, pg_wlk_t *wlk)
{
   pde32_t  *pd, *pde;
   pte32_t  *pt, *pte;
   uint32_t vaddr = _vaddr & 0xffffffff;

   wlk->attr = 0;

   pd = (pde32_t*)page_addr(cr3->addr);
   if(vmm_area_range(pd, PG_4K_SIZE))
   {
      debug(PG_WLK, "pd32 in vmm area\n");
      return VM_FAIL;
   }

   pde = &pd[pd32_idx(vaddr)];
   debug(PG_WLK, "pde32 @ 0x%X = 0x%x\n", (offset_t)pde, pde->raw);

   if(!pg_present(pde))
   {
      debug(PG_WLK, "pde32 not present\n");
      wlk->type  = PG_WALK_TYPE_PDE32;
      wlk->entry = (void*)pde;
      return VM_FAULT;
   }

   wlk->u = pde->lvl;
   wlk->r = 1;
   wlk->w = pde->rw;
   wlk->x = 1;

   if(__cr4.pse && pg_large(pde))
   {
      wlk->addr  = pg_4M_addr((uint32_t)pde->page.addr) + pg_4M_offset(vaddr);
      wlk->type  = PG_WALK_TYPE_PDE32;
      wlk->size  = PG_4M_SIZE;
      wlk->entry = (void*)pde;
      goto __success;
   }

   pt = (pte32_t*)page_addr(pde->addr);
   if(vmm_area_range(pt, PG_4K_SIZE))
   {
      debug(PG_WLK, "pt32 in vmm area\n");
      return VM_FAIL;
   }

   pte = &pt[pt32_idx(vaddr)];
   debug(PG_WLK, "pte32 @ 0x%X = 0x%x\n", (offset_t)pte, pte->raw);

   if(!pg_present(pte))
   {
      debug(PG_WLK, "pte32 not present\n");
      wlk->type  = PG_WALK_TYPE_PTE32;
      wlk->entry = (void*)pte;
      return VM_FAULT;
   }

   wlk->addr  = (offset_t)(pg_4K_addr((uint32_t)pte->addr) + pg_4K_offset(vaddr));
   wlk->type  = PG_WALK_TYPE_PTE32;
   wlk->size  = PG_4K_SIZE;
   wlk->entry = (void*)pte;
   wlk->u    &= pte->lvl;
   wlk->w    &= pte->rw;

__success:
   debug(PG_WLK, "pmode vaddr 0x%x -> guest paddr 0x%X\n", vaddr, wlk->addr);
   return VM_DONE;
}
Esempio n. 13
0
/*
** pmode+pae: 2MB and 4KB pages
** cr4.pse is used
*/
static int __pg_walk_pmode_pae(cr3_reg_t *cr3, offset_t _vaddr, pg_wlk_t *wlk)
{
   pdpe_t   *pdp, *pdpe;
   pde64_t  *pd, *pde;
   pte64_t  *pt, *pte;
   uint32_t vaddr = _vaddr & 0xffffffff;

   wlk->attr = 0;

   pdp = (pdpe_t*)pg_32B_addr((offset_t)cr3->pae.addr);
   if(vmm_area_range(pdp, PG_4K_SIZE))
   {
      debug(PG_WLK, "pdp_pae in vmm area\n");
      return VM_FAIL;
   }

   pdpe = &pdp[pdp_pae_idx(vaddr)];
   debug(PG_WLK, "pdpe_pae @ 0x%X = 0x%X\n", (offset_t)pdpe, pdpe->raw);

   if(!pg_present(pdpe))
   {
      debug(PG_WLK, "pdpe_pae not present\n");
      wlk->type  = PG_WALK_TYPE_PDPE_PAE;
      wlk->entry = (void*)pdpe;
      return VM_FAULT;
   }

   pd = (pde64_t*)page_addr(pdpe->pae.addr);
   if(vmm_area_range(pd, PG_4K_SIZE))
   {
      debug(PG_WLK, "pd64 in vmm area\n");
      return VM_FAIL;
   }

   pde = &pd[pd64_idx(vaddr)];
   debug(PG_WLK, "pde64 @ 0x%X = 0x%X\n", (offset_t)pde, pde->raw);

   if(!pg_present(pde))
   {
      debug(PG_WLK, "pde not present\n");
      wlk->type  = PG_WALK_TYPE_PDE64;
      wlk->entry = (void*)pde;
      return VM_FAULT;
   }

   wlk->u = pde->lvl;
   wlk->r = 1;
   wlk->w = pde->rw;
   wlk->x = pg64_executable(pde);

   if(__cr4.pse && pg_large(pde))
   {
      wlk->addr  = pg_2M_addr((offset_t)pde->page.addr) + pg_2M_offset(vaddr);
      wlk->type  = PG_WALK_TYPE_PDE64;
      wlk->size  = PG_2M_SIZE;
      wlk->entry = (void*)pde;
      goto __success;
   }

   pt = (pte64_t*)page_addr(pde->addr);
   if(vmm_area_range(pt, PG_4K_SIZE))
   {
      debug(PG_WLK, "pt64 in vmm area\n");
      return VM_FAIL;
   }

   pte = &pt[pt64_idx(vaddr)];
   debug(PG_WLK, "pte64 @ 0x%X = 0x%X\n", (offset_t)pte, pte->raw);

   if(!pg_present(pte))
   {
      debug(PG_WLK, "pte64 not present\n");
      wlk->type  = PG_WALK_TYPE_PTE64;
      wlk->entry = (void*)pte;
      return VM_FAULT;
   }

   wlk->addr  = pg_4K_addr((offset_t)pte->addr) + pg_4K_offset(vaddr);
   wlk->type  = PG_WALK_TYPE_PTE64;
   wlk->size  = PG_4K_SIZE;
   wlk->entry = (void*)pte;
   wlk->u    &= pte->lvl;
   wlk->w    &= pte->rw;
   wlk->x    &= pg64_executable(pte);

__success:
   debug(PG_WLK, "pae vaddr 0x%x -> guest paddr 0x%X\n", vaddr, wlk->addr);
   return VM_DONE;
}
Esempio n. 14
0
static inline npg_pte64_t* __npg_get_pte_nocheck(npg_pde64_t *pde, offset_t addr)
{
   npg_pte64_t *pt = (npg_pte64_t*)page_addr(pde->addr);
   return &pt[pt64_idx(addr)];
}
Esempio n. 15
0
/*
** pmode: 4MB and 4KB pages
** cr4.pse is used
*/
static inline
int pg_walk_pmode(cr3_reg_t *cr3,
		  offset_t _vaddr, offset_t *_paddr,
		  size_t *psz, int chk)
{
   pde32_t  *pd, *pde;
   pte32_t  *pt, *pte;
   uint32_t paddr;
   uint32_t vaddr = _vaddr & 0xffffffff;

   pd = (pde32_t*)page_addr(cr3->addr);
   if(chk && vmm_area(pd))
   {
      debug(PG_W, "pd in vmm area\n");
      return 0;
   }

   pde = &pd[pd32_idx(vaddr)];
   debug(PG_W, "pde @ 0x%X = 0x%x\n", (offset_t)pde, pde->raw);

   if(!pg_present(pde))
   {
      debug(PG_W, "pde not present\n");
      return 0;
   }

   if(__cr4.pse && pg_large(pde))
   {
      debug(PG_W, "large page found (pde->addr 0x%x)\n", (uint32_t)pde->page.addr);
      paddr = pg_4M_addr((uint32_t)pde->page.addr) + pg_4M_offset(vaddr);
      *psz = PG_4M_SIZE;
      goto __prepare_addr;
   }

   pt = (pte32_t*)page_addr(pde->addr);
   if(chk && vmm_area(pt))
   {
      debug(PG_W, "pt in vmm area\n");
      return 0;
   }

   pte = &pt[pt32_idx(vaddr)];
   debug(PG_W, "pte @ 0x%X = 0x%x\n", (offset_t)pte, pte->raw);

   if(!pg_present(pte))
   {
      debug(PG_W, "pte not present\n");
      return 0;
   }

   paddr = pg_4K_addr((uint32_t)pte->addr) + pg_4K_offset(vaddr);
   *psz = PG_4K_SIZE;

__prepare_addr:
   if(chk && vmm_area(paddr))
   {
      debug(PG_W, "paddr 0x%x in vmm area\n", paddr);
      return 0;
   }

   debug(PG_W, "pmode vaddr 0x%x -> paddr 0x%x\n", vaddr, paddr);
   *_paddr = (offset_t)paddr;
   return 1;
}
Esempio n. 16
0
/*
** lmode: 1GB, 2MB and 4KB pages
** cr4.pse is ignored
** 1GB cpuid feature must be checked
*/
static int __pg_walk_lmode(cr3_reg_t *cr3, offset_t vaddr, pg_wlk_t *wlk)
{
   pml4e_t *pml4, *pml4e;
   pdpe_t  *pdp, *pdpe;
   pde64_t *pd, *pde;
   pte64_t *pt, *pte;

   wlk->attr = 0;

   pml4 = (pml4e_t*)page_addr(cr3->pml4.addr);
   if(vmm_area_range(pml4, PG_4K_SIZE))
   {
      debug(PG_WLK, "pml4 in vmm area\n");
      return VM_FAIL;
   }

   pml4e = &pml4[pml4_idx(vaddr)];
   debug(PG_WLK, "pml4e @ 0x%X = %X\n", (offset_t)pml4e, pml4e->raw);

   if(!pg_present(pml4e))
   {
      debug(PG_WLK, "pml4e not present\n");
      wlk->type  = PG_WALK_TYPE_PML4E;
      wlk->entry = (void*)pml4e;
      return VM_FAULT;
   }

   wlk->u = pml4e->lvl;
   wlk->r = 1;
   wlk->w = pml4e->rw;
   wlk->x = pg64_executable(pml4e);

   pdp = (pdpe_t*)page_addr(pml4e->addr);
   if(vmm_area_range(pdp, PG_4K_SIZE))
   {
      debug(PG_WLK, "pdp in vmm area\n");
      return VM_FAIL;
   }

   pdpe = &pdp[pdp_idx(vaddr)];
   debug(PG_WLK, "pdpe @ 0x%X = 0x%X\n", (offset_t)pdpe, pdpe->raw);

   if(!pg_present(pdpe))
   {
      debug(PG_WLK, "pdpe not present\n");
      wlk->type  = PG_WALK_TYPE_PDPE;
      wlk->entry = (void*)pdpe;
      return VM_FAULT;
   }

   wlk->u &= pdpe->lvl;
   wlk->w &= pdpe->rw;
   wlk->x &= pg64_executable(pdpe);

   if(info->vmm.cpu.skillz.pg_1G && pg_large(pdpe))
   {
      wlk->addr  = pg_1G_addr((offset_t)pdpe->page.addr) + pg_1G_offset(vaddr);
      wlk->type  = PG_WALK_TYPE_PDPE;
      wlk->size  = PG_1G_SIZE;
      wlk->entry = (void*)pdpe;
      goto __success;
   }

   pd = (pde64_t*)page_addr(pdpe->addr);
   if(vmm_area_range(pd, PG_4K_SIZE))
   {
      debug(PG_WLK, "pd64 in vmm area\n");
      return VM_FAIL;
   }

   pde = &pd[pd64_idx(vaddr)];
   debug(PG_WLK, "pde64 @ 0x%X = 0x%X\n", (offset_t)pde, pde->raw);

   if(!pg_present(pde))
   {
      debug(PG_WLK, "pde not present\n");
      wlk->type  = PG_WALK_TYPE_PDE64;
      wlk->entry = (void*)pde;
      return VM_FAULT;
   }

   wlk->u &= pde->lvl;
   wlk->w &= pde->rw;
   wlk->x &= pg64_executable(pde);

   if(pg_large(pde))
   {
      wlk->addr  = pg_2M_addr((offset_t)pde->page.addr) + pg_2M_offset(vaddr);
      wlk->type  = PG_WALK_TYPE_PDE64;
      wlk->size  = PG_2M_SIZE;
      wlk->entry = (void*)pde;
      goto __success;
   }

   pt = (pte64_t*)page_addr(pde->addr);
   if(vmm_area_range(pt, PG_4K_SIZE))
   {
      debug(PG_WLK, "pt64 in vmm area\n");
      return VM_FAIL;
   }

   pte = &pt[pt64_idx(vaddr)];
   debug(PG_WLK, "pte64 @ 0x%X = 0x%X\n", (offset_t)pte, pte->raw);

   if(!pg_present(pte))
   {
      debug(PG_WLK, "pte not present\n");
      wlk->type  = PG_WALK_TYPE_PTE64;
      wlk->entry = (void*)pte;
      return VM_FAULT;
   }

   wlk->addr  = pg_4K_addr((offset_t)pte->addr) + pg_4K_offset(vaddr);
   wlk->type  = PG_WALK_TYPE_PTE64;
   wlk->size  = PG_4K_SIZE;
   wlk->entry = (void*)pte;
   wlk->u    &= pte->lvl;
   wlk->w    &= pte->rw;
   wlk->x    &= pg64_executable(pte);

__success:
   debug(PG_WLK, "lmode vaddr 0x%X -> guest paddr 0x%X\n", vaddr, wlk->addr);
   return VM_DONE;
}
Esempio n. 17
0
/*
** pmode+pae: 2MB and 4KB pages
** cr4.pse is used
*/
static inline
int pg_walk_pmode_pae(cr3_reg_t *cr3,
		      offset_t _vaddr, offset_t *paddr,
		      size_t *psz, int chk)
{
   pdpe_t   *pdp, *pdpe;
   pde64_t  *pd, *pde;
   pte64_t  *pt, *pte;
   uint32_t vaddr = _vaddr & 0xffffffff;

   pdp = (pdpe_t*)pg_32B_addr((offset_t)cr3->pae.addr);
   if(chk && vmm_area(pdp))
   {
      debug(PG_W, "pdp in vmm area\n");
      return 0;
   }

   pdpe = &pdp[pdp_pae_idx(vaddr)];
   debug(PG_W, "pdpe @ 0x%X = 0x%X\n", (offset_t)pdpe, pdpe->raw);

   if(!pg_present(pdpe))
   {
      debug(PG_W, "pdpe not present\n");
      return 0;
   }

   pd = (pde64_t*)page_addr(pdpe->pae.addr);
   if(chk && vmm_area(pd))
   {
      debug(PG_W, "pd in vmm area\n");
      return 0;
   }

   pde = &pd[pd64_idx(vaddr)];
   debug(PG_W, "pde @ 0x%X = 0x%X\n", (offset_t)pde, pde->raw);

   if(!pg_present(pde))
   {
      debug(PG_W, "pde not present\n");
      return 0;
   }

   if(__cr4.pse && pg_large(pde))
   {
      *paddr = pg_2M_addr((offset_t)pde->page.addr) + pg_2M_offset(vaddr);
      *psz = PG_2M_SIZE;
      goto __prepare_addr;
   }

   pt = (pte64_t*)page_addr(pde->addr);
   if(chk && vmm_area(pt))
   {
      debug(PG_W, "pt in vmm area\n");
      return 0;
   }

   pte = &pt[pt64_idx(vaddr)];
   debug(PG_W, "pte @ 0x%X = 0x%X\n", (offset_t)pte, pte->raw);

   if(!pg_present(pte))
   {
      debug(PG_W, "pte not present\n");
      return 0;
   }

   *paddr = pg_4K_addr((offset_t)pte->addr) + pg_4K_offset(vaddr);
   *psz = PG_4K_SIZE;

__prepare_addr:
   if(chk && vmm_area(*paddr))
   {
      debug(PG_W, "paddr 0x%x in vmm area\n", *paddr);
      return 0;
   }

   debug(PG_W, "pae vaddr 0x%x -> paddr 0x%x\n", vaddr, *paddr);
   return 1;
}
Esempio n. 18
0
/*
** Low level entry retrieval
*/
static inline npg_pdpe_t* __npg_get_pdpe_nocheck(npg_pml4e_t *pml4e, offset_t addr)
{
   npg_pdpe_t *pdp = (npg_pdpe_t*)page_addr(pml4e->addr);
   return &pdp[pdp_idx(addr)];
}
Esempio n. 19
0
/*
** lmode: 1GB, 2MB and 4KB pages
** cr4.pse is ignored
** 1GB cpuid feature must be checked
*/
static inline
int pg_walk_lmode(cr3_reg_t *cr3,
		  offset_t vaddr, offset_t *paddr,
		  size_t *psz, int chk)
{
   pml4e_t *pml4, *pml4e;
   pdpe_t  *pdp, *pdpe;
   pde64_t *pd, *pde;
   pte64_t *pt, *pte;

   pml4 = (pml4e_t*)page_addr(cr3->pml4.addr);
   if(chk && vmm_area(pml4))
   {
      debug(PG_W, "pml4 in vmm area\n");
      return 0;
   }

   pml4e = &pml4[pml4_idx(vaddr)];
   debug(PG_W, "pml4e @ 0x%X = %X\n", (offset_t)pml4e, pml4e->raw);

   if(!pg_present(pml4e))
   {
      debug(PG_W, "pml4e not present\n");
      return 0;
   }

   pdp = (pdpe_t*)page_addr(pml4e->addr);
   if(chk && vmm_area(pdp))
   {
      debug(PG_W, "pdp in vmm area\n");
      return 0;
   }

   pdpe = &pdp[pdp_idx(vaddr)];
   debug(PG_W, "pdpe @ 0x%X = 0x%X\n", (offset_t)pdpe, pdpe->raw);

   if(!pg_present(pdpe))
   {
      debug(PG_W, "pdpe not present\n");
      return 0;
   }

   if(info->vmm.cpu.skillz.pg_1G && pg_large(pdpe))
   {
      *paddr = pg_1G_addr((offset_t)pdpe->page.addr) + pg_1G_offset(vaddr);
      *psz = PG_1G_SIZE;
      goto __prepare_addr;
   }

   pd = (pde64_t*)page_addr(pdpe->addr);
   if(chk && vmm_area(pd))
   {
      debug(PG_W, "pd in vmm area\n");
      return 0;
   }

   pde = &pd[pd64_idx(vaddr)];
   debug(PG_W, "pde @ 0x%X = 0x%X\n", (offset_t)pde, pde->raw);

   if(!pg_present(pde))
   {
      debug(PG_W, "pde not present\n");
      return 0;
   }

   if(pg_large(pde))
   {
      *paddr = pg_2M_addr((offset_t)pde->page.addr) + pg_2M_offset(vaddr);
      *psz = PG_2M_SIZE;
      goto __prepare_addr;
   }

   pt = (pte64_t*)page_addr(pde->addr);
   if(chk && vmm_area(pt))
   {
      debug(PG_W, "pt in vmm area\n");
      return 0;
   }

   pte = &pt[pt64_idx(vaddr)];
   debug(PG_W, "pte @ 0x%X = 0x%X\n", (offset_t)pte, pte->raw);

   if(!pg_present(pte))
   {
      debug(PG_W, "pte not present\n");
      return 0;
   }

   *paddr = pg_4K_addr((offset_t)pte->addr) + pg_4K_offset(vaddr);
   *psz = PG_4K_SIZE;

__prepare_addr:
   if(chk && vmm_area(*paddr))
   {
      debug(PG_W, "paddr 0x%x in vmm area\n", *paddr);
      return 0;
   }

   debug(PG_W, "lmode vaddr 0x%X -> paddr 0x%X\n", vaddr, *paddr);
   return 1;
}
Esempio n. 20
0
static inline npg_pde64_t* __npg_get_pde_nocheck(npg_pdpe_t *pdpe, offset_t addr)
{
   npg_pde64_t *pd = (npg_pde64_t*)page_addr(pdpe->addr);
   return &pd[pd64_idx(addr)];
}