Ejemplo n.º 1
0
void
Generic_obj_space<SPACE>::caps_free()
{
  Mem_space *ms = mem_space();
  if (EXPECT_FALSE(!ms || !ms->dir()))
    return;

  Mapped_allocator *a = Mapped_allocator::allocator();
  for (unsigned long i = 0; i < map_max_address().value();
       i += Caps_per_page)
    {
      Entry *c = get_cap(i);
      if (!c)
	continue;

      Address cp = Address(ms->virt_to_phys(Address(c)));
      assert_kdb (cp != ~0UL);
      void *cv = (void*)Mem_layout::phys_to_pmem(cp);
      remove_dbg_info(cv);

      a->q_unaligned_free(ram_quota(), Config::PAGE_SIZE, cv);
    }
#if defined (CONFIG_ARM)
  ms->dir()->free_page_tables((void*)Mem_layout::Caps_start, (void*)Mem_layout::Caps_end);
#else
  ms->dir()->Pdir::alloc_cast<Mem_space_q_alloc>()
    ->destroy(Virt_addr(Mem_layout::Caps_start),
              Virt_addr(Mem_layout::Caps_end), Pdir::Depth - 1,
              Mem_space_q_alloc(ram_quota(), Mapped_allocator::allocator()));
#endif
}
Ejemplo n.º 2
0
void
Obj_space_virt<SPACE>::caps_free()
{
  Mem_space *ms = SPACE::mem_space(this);
  if (EXPECT_FALSE(!ms || !ms->dir()))
    return;

  Kmem_alloc *a = Kmem_alloc::allocator();
  for (Cap_index i = Cap_index(0); i < obj_map_max_address();
       i += Cap_diff(Obj::Caps_per_page))
    {
      Entry *c = get_cap(i);
      if (!c)
	continue;

      Address cp = Address(ms->virt_to_phys(Address(c)));
      assert_kdb (cp != ~0UL);
      void *cv = (void*)Mem_layout::phys_to_pmem(cp);
      Obj::remove_cap_page_dbg_info(cv);

      a->q_unaligned_free(SPACE::ram_quota(this), Config::PAGE_SIZE, cv);
    }
  ms->dir()->destroy(Virt_addr(Mem_layout::Caps_start),
                     Virt_addr(Mem_layout::Caps_end-1),
                     Pdir::Super_level,
                     Pdir::Depth,
                     Kmem_alloc::q_allocator(SPACE::ram_quota(this)));
}
PUBLIC
Vm_vmx_ept::~Vm_vmx_ept()
{
  if (_ept)
    {
      _ept->destroy(Virt_addr(0UL), Virt_addr(~0UL), 0, Ept::Depth,
                    Kmem_alloc::q_allocator(ram_quota()));
      Kmem_alloc::allocator()->q_free(ram_quota(), Config::PAGE_SHIFT, _ept);
      _ept = 0;
      _ept_phys = 0;
    }
}
PUBLIC
L4_fpage::Rights
Vm_vmx_ept::v_delete(Mem_space::Vaddr virt, Mem_space::Page_order size,
                     L4_fpage::Rights page_attribs)
{
  (void)size;
  assert (cxx::get_lsb(Virt_addr(virt), size) == 0);

  auto i = _ept->walk(virt);

  if (EXPECT_FALSE (! i.is_valid()))
    return L4_fpage::Rights(0);

  L4_fpage::Rights ret = i.access_flags();

  if (! (page_attribs & L4_fpage::Rights::R()))
    {
      // downgrade PDE (superpage) rights
      i.del_rights(page_attribs);
    }
  else
    {
      // delete PDE (superpage)
      i.clear();
    }

  return ret;
}
Ejemplo n.º 5
0
PRIVATE
Address
Device_map::map(Address phys, bool /*cache*/)
{
  unsigned idx = lookup_idx(phys);
  if (idx != ~0U)
    return (Virt_base + idx * Config::SUPERPAGE_SIZE)
           | (phys & ~(~0UL << Config::SUPERPAGE_SHIFT));

  Address p = phys & (~0UL << Config::SUPERPAGE_SHIFT);
  Kmem_alloc *const alloc = Kmem_alloc::allocator();
  for (unsigned i = 0; i < Max; ++i)
    if (_map[i] == ~0UL)
      {
	Kmem::kdir->map(p,
                        Virt_addr(Virt_base + (i * Config::SUPERPAGE_SIZE)),
                        Virt_size(Config::SUPERPAGE_SIZE),
                        Pt_entry::Dirty | Pt_entry::Writable
                        | Pt_entry::Referenced,
                        Pdir::super_level(), pdir_alloc(alloc));
	_map[i] = p;

	return (Virt_base + (i * Config::SUPERPAGE_SIZE))
	       | (phys & ~(~0UL << Config::SUPERPAGE_SHIFT));
      }

  return ~0UL;
}
Ejemplo n.º 6
0
IMPLEMENT
void
Task::map_utcb_ptr_page()
{
  //Mem_space::Status res =
	static_cast<Mem_space*>(this)->v_insert(
	    Mem_space::Phys_addr(Mem_layout::Utcb_ptr_frame),
	    Virt_addr(Mem_layout::Utcb_ptr_page_user),
	    Mem_space::Page_order(Config::PAGE_SHIFT),
	    Mem_space::Attr(Page::Rights::URW()));
}
Ejemplo n.º 7
0
//---------------------------------------------------------------------------
IMPLEMENTATION[!ppc32]:

PUBLIC
Address
Pdir::virt_to_phys(Address virt) const
{
  Iter i = walk(Virt_addr(virt));
  if (!i.e->valid())
    return ~0;

  return i.e->addr() | (virt & ~(~0UL << i.shift()));
}
Ejemplo n.º 8
0
PUBLIC
void
Device_map::unmap(void const *phys)
{
  unsigned idx = lookup_idx((Address)phys);
  if (idx == ~0U)
    return;

  Address v = Virt_base + (idx * Config::SUPERPAGE_SIZE);

  Kmem::kdir->unmap(Virt_addr(v), Virt_size(Config::SUPERPAGE_SIZE), -1);
}
Ejemplo n.º 9
0
//---------------------------------------------------------------------------
IMPLEMENTATION[ppc32]:

PUBLIC
Address
Pdir::virt_to_phys(Address virt) const
{
  Iter i = walk(Virt_addr(virt));

  if (!i.e->valid())
    return ~0UL;

  Address phys;
  Pte_htab::pte_lookup(i.e, &phys);
  return phys | (virt & ~(~0UL << i.shift()));
}
Ejemplo n.º 10
0
//---------------------------------------------------------------------------
IMPLEMENTATION[ppc32]:

PUBLIC
Address
Pdir::virt_to_phys(Address virt) const
{
  auto i = walk(Virt_addr(virt));

  //if (!i.is_valid())
    return Invalid_address;

#ifdef FIX_THIS
  Address phys;
  Pte_htab::pte_lookup(i.e, &phys);
  return phys | (virt & ~(~0UL << i.page_order()));
#endif
}
Ejemplo n.º 11
0
//---------------------------------------------------------------------------
IMPLEMENTATION [arm]:

#include "mem_unit.h"
#include "kmem_space.h"
#include "paging.h"
#include <cassert>

IMPLEMENT inline
Mword Kmem::is_kmem_page_fault(Mword pfa, Mword)
{
  return in_kernel(pfa);
}

IMPLEMENT inline
Mword Kmem::is_io_bitmap_page_fault(Mword)
{
  return 0;
}

PUBLIC static
Address
Kmem::mmio_remap(Address phys)
{
  static Address ndev = 0;
  Address v = phys_to_pmem(phys);
  if (v != ~0UL)
    return v;

  Address dm = Mem_layout::Registers_map_start + ndev;
  assert(dm < Mem_layout::Registers_map_end);

  ndev += Config::SUPERPAGE_SIZE;

  auto m = Kmem_space::kdir()->walk(Virt_addr(dm), Pte_ptr::Super_level);
  assert (!m.is_valid());
  assert (m.page_order() == Config::SUPERPAGE_SHIFT);
  Address phys_page = cxx::mask_lsb(phys, Config::SUPERPAGE_SHIFT);
  m.create_page(Phys_mem_addr(phys_page), Page::Attr(Page::Rights::RWX(), Page::Type::Uncached(),
                Page::Kern::Global()));

  m.write_back_if(true, Mem_unit::Asid_kernel);
  add_pmem(phys_page, dm, Config::SUPERPAGE_SIZE);

  return phys_to_pmem(phys);
}
Ejemplo n.º 12
0
IMPLEMENT
void*
Vmem_alloc::page_alloc (void *address, Zero_fill zf, unsigned mode)
{
  void *vpage = 0;
  Address page;

  vpage = Mapped_allocator::allocator()->alloc(Config::PAGE_SHIFT);

  if (EXPECT_FALSE(!vpage))
    return 0;

  // insert page into master page table
  Pdir::Iter e = Kmem::kdir->walk(Virt_addr(address), 100,
                                  Mapped_allocator::allocator());
  if (EXPECT_FALSE(e.e->valid()))
    {
      kdb_ke("page_alloc: address already mapped");
      goto error;
    }

  if (e.shift() != Config::PAGE_SHIFT)
    goto error;

  if (zf == ZERO_FILL)
    memset(vpage, 0, Config::PAGE_SIZE);

  page = Mem_layout::pmem_to_phys((Address)vpage);

  *e.e = page | Pt_entry::Writable | Pt_entry::Dirty
    | Pt_entry::Valid | Pt_entry::Referenced | Pt_entry::global();
  page_map (address, 0, zf, page);

  if (mode & User)
    e.e->add_attr(Pt_entry::User);

  return address;

error:
  Mapped_allocator::allocator()->free(Config::PAGE_SHIFT, vpage); // 2^0 = 1 page
  return 0;
}
Ejemplo n.º 13
0
// Establish a 4k-mapping
PUBLIC static
void
Kmem::map_phys_page(Address phys, Address virt,
                    bool cached, bool global, Address *offs=0)
{
  Pdir::Iter i = kdir->walk(Virt_addr(virt), 100, pdir_alloc(Kmem_alloc::allocator()));
  Pte_base *e = i.e;
  Mword pte = phys & Config::PAGE_MASK;

  assert(i.shift() == Config::PAGE_SHIFT);

  *e = pte | Pt_entry::Valid | Pt_entry::Writable
	   | Pt_entry::Referenced | Pt_entry::Dirty
	   | (cached ? 0 : (Pt_entry::Write_through | Pt_entry::Noncacheable))
	   | (global ? Pt_entry::global() : 0);
  Mem_unit::tlb_flush(virt);

  if (offs)
    *offs = phys - pte;
}
PUBLIC
Mem_space::Status
Vm_vmx_ept::v_insert(Mem_space::Phys_addr phys, Mem_space::Vaddr virt,
                     Mem_space::Page_order size, Mem_space::Attr page_attribs)
{
  // insert page into page table

  // XXX should modify page table using compare-and-swap

  assert (cxx::get_lsb(Mem_space::Phys_addr(phys), size) == 0);
  assert (cxx::get_lsb(Virt_addr(virt), size) == 0);

  int level;
  for (level = 0; level <= Ept::Depth; ++level)
    if (Mem_space::Page_order(Ept::page_order_for_level(level)) <= size)
      break;

  auto i = _ept->walk(virt, level, false,
                      Kmem_alloc::q_allocator(ram_quota()));

  if (EXPECT_FALSE(!i.is_valid() && i.level != level))
    return Mem_space::Insert_err_nomem;

  if (EXPECT_FALSE(i.is_valid()
                   && (i.level != level || Mem_space::Phys_addr(i.page_addr()) != phys)))
    return Mem_space::Insert_err_exists;

  if (i.is_valid())
    {
      if (EXPECT_FALSE(!i.add_attribs(page_attribs)))
        return Mem_space::Insert_warn_exists;

      return Mem_space::Insert_warn_attrib_upgrade;
    }
  else
    {
      i.create_page(phys, page_attribs);
      return Mem_space::Insert_ok;
    }

}
Ejemplo n.º 15
0
void *
Vmem_alloc::page_unmap (void *page)
{
  Address phys = Kmem::virt_to_phys(page);

  if (phys == (Address) -1)
    return 0;
  
  Address va = reinterpret_cast<Address>(page);
  void *res = (void*)Mem_layout::phys_to_pmem(phys);

  if (va < Mem_layout::Vmem_end)
    {
      // clean out page-table entry
      *(Kmem::kdir->walk(Virt_addr(va)).e) = 0;
      page_unmap (page, 0);
      Mem_unit::tlb_flush(va);
    }

  return res;
}
Ejemplo n.º 16
0
/*inline NEEDS["kmem_alloc.h", <cstring>, "ram_quota.h",
                     Obj_space_virt::cap_virt]*/
typename Obj_space_virt<SPACE>::Entry *
Obj_space_virt<SPACE>::caps_alloc(Cap_index virt)
{
  Address cv = (Address)cap_virt(virt);
  void *mem = Kmem_alloc::allocator()->q_unaligned_alloc(SPACE::ram_quota(this), Config::PAGE_SIZE);

  if (!mem)
    return 0;

  Obj::add_cap_page_dbg_info(mem, SPACE::get_space(this), cxx::int_value<Cap_index>(virt));

  Mem::memset_mwords(mem, 0, Config::PAGE_SIZE / sizeof(Mword));

  Mem_space::Status s;
  s = SPACE::mem_space(this)->v_insert(
      Mem_space::Phys_addr(Mem_space::kernel_space()->virt_to_phys((Address)mem)),
      cxx::mask_lsb(Virt_addr(cv), Mem_space::Page_order(Config::PAGE_SHIFT)),
      Mem_space::Page_order(Config::PAGE_SHIFT),
      Mem_space::Attr(L4_fpage::Rights::RW()));
      //| Mem_space::Page_referenced | Mem_space::Page_dirty);

  switch (s)
    {
    case Mem_space::Insert_ok:
      break;
    case Mem_space::Insert_warn_exists:
    case Mem_space::Insert_warn_attrib_upgrade:
      assert (false);
      break;
    case Mem_space::Insert_err_exists:
    case Mem_space::Insert_err_nomem:
      Kmem_alloc::allocator()->q_unaligned_free(SPACE::ram_quota(this),
          Config::PAGE_SIZE, mem);
      return 0;
    };

  unsigned long cap = cv & (Config::PAGE_SIZE - 1) | (unsigned long)mem;

  return reinterpret_cast<Entry*>(cap);
}
Ejemplo n.º 17
0
void
Vmem_alloc::page_free (void *page)
{
  Address phys = Kmem::virt_to_phys(page);

  if (phys == (Address) -1)
    return;

  // convert back to virt (do not use "page") to get canonic mapping
  Mapped_allocator::allocator()->free(Config::PAGE_SHIFT, // 2^0=1 pages
      Kmem::phys_to_virt(phys));

  Address va = reinterpret_cast<Address>(page);

  if (va < Mem_layout::Vmem_end)
    {
      // clean out page-table entry
      Kmem::kdir->walk(Virt_addr(va)).e->clear();
      page_unmap (page, 0);
      Mem_unit::tlb_flush(va);
    }
}
Ejemplo n.º 18
0
IMPLEMENTATION [ia32-abs_syscalls]:

#include <cstdio>
#include <cstring>
#include "config.h"
#include "cpu.h"
#include "kernel_task.h"
#include "mem_layout.h"
#include "panic.h"
#include "paging.h"
#include "space.h"
#include "types.h"
#include "vmem_alloc.h"

enum
{
  Offs_invoke            = 0x000,
  Offs_se_invoke         = 0x000,
  Offs_kip_invoke        = 0x800,
  Offs_kip_se_invoke     = 0x800,
  Offs_debugger          = 0x200,
  Offs_kip_debugger      = 0x900,
};


#define INV_SYSCALL(sysc) \
  *reinterpret_cast<Unsigned16*>(Mem_layout::Syscalls + Offs_##sysc) = 0x0b0f

#define SYSCALL_SYMS(sysc) \
extern char sys_call_##sysc, sys_call_##sysc##_end

#define COPY_SYSCALL(sysc) do { \
memcpy( (char*)Mem_layout::Syscalls + Offs_##sysc, &sys_call_##sysc, \
        &sys_call_##sysc##_end- &sys_call_##sysc ); \
memcpy( (char*)Kip::k() + Offs_kip_##sysc, &sys_call_##sysc, \
        &sys_call_##sysc##_end- &sys_call_##sysc ); } while (0)


IMPLEMENT 
void
Sys_call_page::init()
{
  SYSCALL_SYMS(invoke);
  SYSCALL_SYMS(se_invoke);
  SYSCALL_SYMS(debugger);

  if (!Vmem_alloc::page_alloc((void*)Mem_layout::Syscalls, 
	Vmem_alloc::ZERO_FILL, Vmem_alloc::User))
    panic("FIASCO: can't allocate system-call page.\n");

  printf ("Absolute KIP Syscalls using: %s\n",
          Cpu::have_sysenter() ? "Sysenter" : "int 0x30");

  Kip::k()->kip_sys_calls       = 2;

  if (Cpu::have_sysenter())
    COPY_SYSCALL(se_invoke);
  else
    COPY_SYSCALL(invoke);

  COPY_SYSCALL(debugger);

  Kernel_task::kernel_task()->set_attributes(
      Virt_addr(Mem_layout::Syscalls),
      Page::Attr(Page::Rights::UR(), Page::Type::Normal(),
                 Page::Kern::Global()));
}
Ejemplo n.º 19
0
PUBLIC static FIASCO_INIT
void
Kmem::init_mmu()
{
  dev_map.init();
  Kmem_alloc *const alloc = Kmem_alloc::allocator();

  kdir = (Pdir*)alloc->alloc(Config::PAGE_SHIFT);
  memset (kdir, 0, Config::PAGE_SIZE);

  unsigned long cpu_features = Cpu::get_features();
  bool superpages = cpu_features & FEAT_PSE;

  printf("Superpages: %s\n", superpages?"yes":"no");

  Pdir::have_superpages(superpages);
  if (superpages)
    Cpu::set_cr4(Cpu::get_cr4() | CR4_PSE);

  if (cpu_features & FEAT_PGE)
    {
      Pt_entry::enable_global();
      Cpu::set_cr4 (Cpu::get_cr4() | CR4_PGE);
    }

  // set up the kernel mapping for physical memory.  mark all pages as
  // referenced and modified (so when touching the respective pages
  // later, we save the CPU overhead of marking the pd/pt entries like
  // this)

  // we also set up a one-to-one virt-to-phys mapping for two reasons:
  // (1) so that we switch to the new page table early and re-use the
  //     segment descriptors set up by boot_cpu.cc.  (we'll set up our
  //     own descriptors later.) we only need the first 4MB for that.
  // (2) a one-to-one phys-to-virt mapping in the kernel's page directory
  //     sometimes comes in handy (mostly useful for debugging)

  // first 4MB page
  kdir->map(0, Virt_addr(0), Virt_size(4 << 20),
      Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced,
      Pdir::super_level(), pdir_alloc(alloc));


  kdir->map(Mem_layout::Kernel_image_phys,
            Virt_addr(Mem_layout::Kernel_image),
            Virt_size(Config::SUPERPAGE_SIZE),
            Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced
            | Pt_entry::global(), Pdir::super_level(), pdir_alloc(alloc));

   if (!Mem_layout::Adap_in_kernel_image)
     kdir->map(Mem_layout::Adap_image_phys,
               Virt_addr(Mem_layout::Adap_image),
               Virt_size(Config::SUPERPAGE_SIZE),
               Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced
               | Pt_entry::global(), Pdir::super_level(), pdir_alloc(alloc));

  // map the last 64MB of physical memory as kernel memory
  kdir->map(Mem_layout::pmem_to_phys(Mem_layout::Physmem),
            Virt_addr(Mem_layout::Physmem), Virt_size(Mem_layout::pmem_size),
            Pt_entry::Writable | Pt_entry::Referenced | Pt_entry::global(),
            Pdir::super_level(), pdir_alloc(alloc));

  // The service page directory entry points to an universal usable
  // page table which is currently used for the Local APIC and the
  // jdb adapter page.
  assert((Mem_layout::Service_page & ~Config::SUPERPAGE_MASK) == 0);

  Pdir::Iter pt = kdir->walk(Virt_addr(Mem_layout::Service_page), 100, pdir_alloc(alloc));

  // kernel mode should acknowledge write-protected page table entries
  Cpu::set_cr0(Cpu::get_cr0() | CR0_WP);

  // now switch to our new page table
  Cpu::set_pdbr(Mem_layout::pmem_to_phys(kdir));

  assert((Mem_layout::Io_bitmap & ~Config::SUPERPAGE_MASK) == 0);

  long cpu_page_size = 0x10 + Config::Max_num_cpus * (sizeof(Tss) + 256);

  if (cpu_page_size < Config::PAGE_SIZE)
    cpu_page_size = Config::PAGE_SIZE;

  pmem_cpu_page = Mem_layout::pmem_to_phys(alloc->unaligned_alloc(cpu_page_size));

  printf("Kmem:: cpu page at %lx (%ldBytes)\n", pmem_cpu_page, cpu_page_size);

  if (superpages
      && Config::SUPERPAGE_SIZE - (pmem_cpu_page & ~Config::SUPERPAGE_MASK) < 0x10000)
    {
      // can map as 4MB page because the cpu_page will land within a
      // 16-bit range from io_bitmap
      *(kdir->walk(Virt_addr(Mem_layout::Io_bitmap - Config::SUPERPAGE_SIZE),
                   Pdir::Super_level, pdir_alloc(alloc)).e)
	= (pmem_cpu_page & Config::SUPERPAGE_MASK)
          | Pt_entry::Pse_bit
          | Pt_entry::Writable | Pt_entry::Referenced
          | Pt_entry::Dirty | Pt_entry::global() | Pt_entry::Valid;

      cpu_page_vm = (pmem_cpu_page & ~Config::SUPERPAGE_MASK)
                    + (Mem_layout::Io_bitmap - Config::SUPERPAGE_SIZE);
    }
  else
    {
      unsigned i;
      for (i = 0; cpu_page_size > 0; ++i, cpu_page_size -= Config::PAGE_SIZE)
	{
	  pt = kdir->walk(Virt_addr(Mem_layout::Io_bitmap - Config::PAGE_SIZE * (i+1)),
	                  100, pdir_alloc(alloc));

	  *pt.e = (pmem_cpu_page + i*Config::PAGE_SIZE)
	          | Pt_entry::Valid | Pt_entry::Writable
	          | Pt_entry::Referenced | Pt_entry::Dirty
	          | Pt_entry::global();
	}

      cpu_page_vm = Mem_layout::Io_bitmap - Config::PAGE_SIZE * i;
    }

  // the IO bitmap must be followed by one byte containing 0xff
  // if this byte is not present, then one gets page faults
  // (or general protection) when accessing the last port
  // at least on a Pentium 133.
  //
  // Therefore we write 0xff in the first byte of the cpu_page
  // and map this page behind every IO bitmap
  io_bitmap_delimiter = reinterpret_cast<Unsigned8 *>(cpu_page_vm);

  cpu_page_vm += 0x10;

  // did we really get the first byte ??
  assert((reinterpret_cast<Address>(io_bitmap_delimiter)
          & ~Config::PAGE_MASK) == 0);
  *io_bitmap_delimiter = 0xff;
}