Ejemplo n.º 1
0
Address X86Memory::mapVirtual(Address paddr, Address vaddr, ulong prot)
{
    /* Virtual address specified? */
    if (vaddr == ZERO)
        vaddr = findFree(PAGETABFROM, PAGEDIRADDR);

    /* Point to the correct page table. */
    myPageTab = PAGETABADDR(vaddr);
    
    /* Do we have the page table in memory? */
    if (!(myPageDir[DIRENTRY(vaddr)] & PAGE_PRESENT)) {
        /* Then first allocate new page table. */
        Address newPageTab  = memory->allocatePhysical(PAGESIZE);
        newPageTab |= PAGE_PRESENT | PAGE_RW | prot;

        /* Map the new page table into memory. */
        myPageDir[DIRENTRY(vaddr)] = newPageTab;
        tlb_flush(myPageTab);

        /* Zero the new page table. */
        memset(myPageTab, 0, PAGESIZE);
    }
    
    /* Map physical to virtual address. */
    myPageTab[TABENTRY(vaddr)] = (paddr & PAGEMASK) | prot;
    tlb_flush(vaddr);

    /* Success. */
    return vaddr;
}
Ejemplo n.º 2
0
MemoryContext::Result ARMFirstTable::map(Address virt,
                                         Address phys,
                                         Memory::Access access,
                                         SplitAllocator *alloc)
{
    ARMSecondTable *table = getSecondTable(virt, alloc);
    Address addr;

    // Input addresses must be aligned on pagesize boundary
    if ((phys & ~PAGEMASK) || (virt & ~PAGEMASK))
        return MemoryContext::InvalidAddress;

    // Check if the page table is present.
    if (!table)
    {
        // Reject if already mapped as a (super)section
        if (m_tables[ DIRENTRY(virt) ] & PAGE1_SECTION)
            return MemoryContext::AlreadyExists;

        // TODO: Wasting some of the 4KB page, because a page table is only 1KB for ARM.
        // Allocate a new page table
        if (alloc->allocateLow(sizeof(ARMSecondTable), &addr) != Allocator::Success)
            return MemoryContext::OutOfMemory;

        MemoryBlock::set(alloc->toVirtual(addr), 0, PAGESIZE); //sizeof(ARMSecondTable));

        // Assign to the page directory. Do not assign permission flags (only for direct sections).
        m_tables[ DIRENTRY(virt) ] = addr | PAGE1_TABLE;
        cache1_clean(&m_tables[DIRENTRY(virt)]);
        table = getSecondTable(virt, alloc);
    }
    // TODO: (re)check for MemoryAccess ?
    return table->map(virt, phys, access);
}
Ejemplo n.º 3
0
MemoryContext::Result IntelPageDirectory::copy(IntelPageDirectory *directory,
                                               Address from,
                                               Address to)
{
    while (from < to)
    {
        m_tables[ DIRENTRY(from) ] = directory->m_tables[ DIRENTRY(from) ];
        from += MegaByte(4);
    }
    return MemoryContext::Success;
}
Ejemplo n.º 4
0
Address MemoryServer::findFreeRange(ProcessID procID, Size size)
{
    Address *pageDir, *pageTab, vaddr, vbegin;

    /* Initialize variables. */
    vbegin  = ZERO;
    vaddr   = 1024 * 1024 * 16;
    pageDir = PAGETABADDR_FROM(PAGETABFROM, PAGEUSERFROM);
    pageTab = PAGETABADDR_FROM(vaddr, PAGEUSERFROM);

    /* Map page tables. */
    VMCtl(procID, MapTables);

    /* Scan tables. */
    for (Size inc = PAGESIZE; DIRENTRY(vaddr) < PAGEDIR_MAX ; vaddr += inc)
    {
	/* Is the hole big enough? */
	if (vbegin && vaddr - vbegin >= size)
	{
	    break;
	}
	/* Increment per page table. */
	inc = PAGETAB_MAX * PAGESIZE;
	
	/* Try the current address. */
	if (pageDir[DIRENTRY(vaddr)] & PAGE_RESERVED)
	{
	    vbegin = ZERO; continue;
	}
	else if (pageDir[DIRENTRY(vaddr)] & PAGE_PRESENT)
	{
	    /* Look further into the page table. */
	    inc     = PAGESIZE;
	    pageTab = PAGETABADDR_FROM(vaddr, PAGEUSERFROM);
	
	    if (pageTab[TABENTRY(vaddr)] & PAGE_PRESENT)
	    {
		vbegin = ZERO; continue;
	    }
	}
	/* Reset start address if needed. */
	if (!vbegin)
	{
	    vbegin = vaddr;
	}
    }
    /* Clean up. */
    VMCtl(procID, UnMapTables);
    
    /* Done. */
    return vbegin;
}
Ejemplo n.º 5
0
MemoryContext::Result IntelPageDirectory::translate(Address virt, Address *phys, SplitAllocator *alloc)
{
    IntelPageTable *table = getPageTable(virt, alloc);
    if (!table)
    {
        if (m_tables[DIRENTRY(virt)] & PAGE_SECTION)
        {
            *phys = (m_tables[DIRENTRY(virt)] & PAGEMASK) + ((virt % MegaByte(4)) & PAGEMASK);
            return MemoryContext::Success;
        }
        return MemoryContext::InvalidAddress;
    }
    else
        return table->translate(virt, phys);
}
Ejemplo n.º 6
0
void MemoryServer::reservePrivate(MemoryMessage *msg)
{
    Address *pageDir;
    
    /* Verify virtual addresses. */
    if (!(msg->virtualAddress >= 1024*1024*16))
    {
	msg->result = EINVAL;
	return;
    }
    /* Point page directory. */
    pageDir = (Address *) PAGETABADDR_FROM(PAGETABFROM,
					   PAGEUSERFROM);
    /* Map page directory. */
    VMCtl(msg->from, MapTables);

    /* Loop directory. Mark them reserved. */
    for (Address i = msg->virtualAddress;
		 i < msg->virtualAddress + msg->bytes;
	         i += (PAGESIZE * PAGETAB_MAX))
    {
	pageDir[DIRENTRY(i)] |= PAGE_RESERVED;
    }
    /* Unmap. */
    VMCtl(msg->from, UnMapTables);
    
    /* Done. */
    msg->result = ESUCCESS;
}
Ejemplo n.º 7
0
MemoryContext::Result ARMFirstTable::unmap(Address virt, SplitAllocator *alloc)
{
    ARMSecondTable *table = getSecondTable(virt, alloc);
    if (!table)
    {
        if (m_tables[DIRENTRY(virt)] & PAGE1_SECTION)
        {
            m_tables[DIRENTRY(virt)] = PAGE1_NONE;
            cache1_clean(&m_tables[DIRENTRY(virt)]);
            return MemoryContext::Success;
        }
        else
            return MemoryContext::InvalidAddress;
    }
    else
        return table->unmap(virt);
}
Ejemplo n.º 8
0
MemoryContext::Result ARMFirstTable::mapLarge(Memory::Range range,
                                              SplitAllocator *alloc)
{
    if (range.size & 0xfffff)
        return MemoryContext::InvalidSize;

    if ((range.phys & ~PAGEMASK) || (range.virt & ~PAGEMASK))
        return MemoryContext::InvalidAddress;

    for (Size i = 0; i < range.size; i += MegaByte(1))
    {
        if (m_tables[ DIRENTRY(range.virt + i) ] & (PAGE1_TABLE | PAGE1_SECTION))
            return MemoryContext::AlreadyExists;

        m_tables[ DIRENTRY(range.virt + i) ] = (range.phys + i) | PAGE1_SECTION | flags(range.access);
        cache1_clean(&m_tables[DIRENTRY(range.virt + i)]);
    }
    return MemoryContext::Success;
}
Ejemplo n.º 9
0
IntelPageTable * IntelPageDirectory::getPageTable(Address virt, SplitAllocator *alloc)
{
    u32 entry = m_tables[ DIRENTRY(virt) ];

    // Check if the page table is present.
    if (!(entry & PAGE_PRESENT))
        return ZERO;
    else
        return (IntelPageTable *) alloc->toVirtual(entry & PAGEMASK);
}
Ejemplo n.º 10
0
ARMSecondTable * ARMFirstTable::getSecondTable(Address virt, SplitAllocator *alloc)
{
    u32 entry = m_tables[ DIRENTRY(virt) ];

    // Check if the page table is present.
    if (!(entry & PAGE1_TABLE))
        return ZERO;
    else
        return (ARMSecondTable *) alloc->toVirtual(entry & PAGEMASK);
}
Ejemplo n.º 11
0
void X86Memory::mapRemote(X86Process *p, Address pageTabAddr,
			  Address pageDirAddr, ulong prot)
{
    /* Map remote page directory and page table. */
    myPageDir[DIRENTRY(pageDirAddr)] =
	p->getPageDirectory() | (PAGE_PRESENT|PAGE_RW|PAGE_PINNED|prot);
    remPageTab = PAGETABADDR_FROM(pageTabAddr, PAGETABFROM_REMOTE);
    
    /* Refresh entire TLB cache. */
    tlb_flush_all();
}
Ejemplo n.º 12
0
Address X86Memory::mapVirtual(X86Process *p, Address paddr,
			      Address vaddr, ulong prot)
{
    /* Map remote pages. */
    mapRemote(p, vaddr);

    /* Virtual address specified? */
    if (vaddr == ZERO)
    {
	vaddr = findFree(PAGETABFROM_REMOTE, remPageDir);
    }
    /* Repoint to the correct (remote) page table. */
    remPageTab = PAGETABADDR_FROM(vaddr, PAGETABFROM_REMOTE);
    
    /* Does the remote process have the page table in memory? */
    if (!(remPageDir[DIRENTRY(vaddr)] & PAGE_PRESENT))
    {
	/* Nope, allocate a page table first. */
	Address newPageTab  = memory->allocatePhysical(PAGESIZE);
	newPageTab |= PAGE_PRESENT | PAGE_RW | prot;
	
	/* Map the new page table into remote memory. */
	remPageDir[DIRENTRY(vaddr)] = newPageTab;
	
	/* Update caches. */
	tlb_flush(remPageTab);
	
	/* Zero the new page. */
	memset(remPageTab, 0, PAGESIZE);
    }
    /* Map physical address to remote virtual address. */
    remPageTab[TABENTRY(vaddr)] = (paddr & PAGEMASK) | prot;
    tlb_flush(vaddr);

    /* Success. */
    return (Address) vaddr;
}
Ejemplo n.º 13
0
Address X86Memory::lookupVirtual(X86Process *p, Address vaddr)
{
    Address ret = ZERO;

    /* Map remote page tables. */
    mapRemote(p, vaddr);
    
    /* Lookup the address, if mapped. */
    if (remPageDir[DIRENTRY(vaddr)] & PAGE_PRESENT &&
        remPageTab[TABENTRY(vaddr)] & PAGE_PRESENT)
    {
        ret = remPageTab[TABENTRY(vaddr)];
    }
    return ret;
}
Ejemplo n.º 14
0
Address X86Memory::findFree(Address pageTabFrom, Address *pageDirPtr)
{
    Address  vaddr = 0xa0000000;
    Address *pageTabPtr = PAGETABADDR_FROM(vaddr, pageTabFrom);

    /* Find a free virtual address. */
    while (pageDirPtr[DIRENTRY(vaddr)] & PAGE_PRESENT &&
           pageTabPtr[TABENTRY(vaddr)] & PAGE_PRESENT)
    {
        /* Look for the next page in line. */
        vaddr     += PAGESIZE;
        pageTabPtr = PAGETABADDR_FROM(vaddr, pageTabFrom);
    }
    return vaddr;
}
Ejemplo n.º 15
0
bool X86Memory::access(X86Process *p, Address vaddr, Size sz, ulong prot)
{
    Size bytes = 0;
    Address vfrom = vaddr;

    /* Map remote pages. */
    mapRemote(p, vaddr);

    /* Verify protection bits. */
    while (bytes < sz &&
	   remPageDir[DIRENTRY(vaddr)] & prot &&
           remPageTab[TABENTRY(vaddr)] & prot)
    {
	vaddr += PAGESIZE;
	bytes += ((vfrom & PAGEMASK) + PAGESIZE) - vfrom;
	vfrom  = vaddr & PAGEMASK;
	remPageTab = PAGETABADDR_FROM(vaddr, PAGETABFROM_REMOTE);
    }
    /* Do we have a match? */
    return (bytes >= sz);
}
Ejemplo n.º 16
0
MemoryContext::Result IntelPageDirectory::map(Address virt,
                                              Address phys,
                                              Memory::Access access,
                                              SplitAllocator *alloc)
{
    IntelPageTable *table = getPageTable(virt, alloc);
    Address addr;

    // Check if the page table is present.
    if (!table)
    {
        // Allocate a new page table
        if (alloc->allocateLow(sizeof(IntelPageTable), &addr) != Allocator::Success)
            return MemoryContext::OutOfMemory;

        MemoryBlock::set(alloc->toVirtual(addr), 0, sizeof(IntelPageTable));

        // Assign to the page directory
        m_tables[ DIRENTRY(virt) ] = addr | PAGE_PRESENT | PAGE_WRITE | flags(access);
        table = getPageTable(virt, alloc);
    }
    return table->map(virt, phys, access);
}
Ejemplo n.º 17
0
X86Process::X86Process(Address entry) : Process(entry)
{
    Address *pageDir, *tmpStack, *ioMap;
    CPUState *regs;

    /* Allocate page directory. */
    pageDirAddr = memory->allocatePhysical(PAGESIZE);
    pageDir     = (Address *) memory->mapVirtual(pageDirAddr);

    /* One page for the I/O bitmap. */
    ioMapAddr   = memory->allocatePhysical(PAGESIZE);
    ioMap       = (Address *) memory->mapVirtual(ioMapAddr);

    /* Clear them first. */
    memset(pageDir,   0, PAGESIZE);
    memset(ioMap,  0xff, PAGESIZE);

    /* Setup mappings. */
    pageDir[0] = kernelPageDir[0];
    pageDir[DIRENTRY(PAGETABFROM) ] = pageDirAddr | PAGE_PRESENT | PAGE_RW;
    pageDir[DIRENTRY(PAGEUSERFROM)] = pageDirAddr | PAGE_PRESENT | PAGE_RW;

    /* Point stacks. */
    stackAddr       = 0xc0000000 - MEMALIGN;
    kernelStackAddr = 0xd0000000 - MEMALIGN;

    /* Allocate stacks. */
    for (int i = 0; i < 4; i++)
    {
        memory->allocateVirtual(this, stackAddr - (i * PAGESIZE),
                                PAGE_PRESENT | PAGE_USER | PAGE_RW);
        memory->allocateVirtual(this, kernelStackAddr - (i * PAGESIZE),
                                PAGE_PRESENT | PAGE_RW);
    }
    /* Map kernel stack. */
    tmpStack = (Address *) memory->mapVirtual(
				memory->lookupVirtual(this, kernelStackAddr) & PAGEMASK);
	
    /* Setup initial registers. */
    regs = (CPUState *) (((u32)tmpStack) + PAGESIZE - sizeof(CPUState));
    memset(regs, 0, sizeof(CPUState));
    regs->ss0    = KERNEL_DS_SEL;
    regs->fs     = USER_DS_SEL;
    regs->gs     = USER_DS_SEL;
    regs->es     = USER_DS_SEL;
    regs->ds     = USER_DS_SEL;
    regs->ebp    = stackAddr;
    regs->esp0   = kernelStackAddr;
    regs->eip    = entry;
    regs->cs     = USER_CS_SEL;
    regs->eflags = 0x202;
    regs->esp3   = stackAddr;
    regs->ss3    = USER_DS_SEL;
    
    /* Repoint our stack. */
    stackAddr = kernelStackAddr - sizeof(CPUState) + MEMALIGN;

    /* Release temporary mappings. */
    memory->mapVirtual((Address) 0, (Address) pageDir, 0);
    memory->mapVirtual((Address) 0, (Address) tmpStack, 0);
    memory->mapVirtual((Address) 0, (Address) ioMap, 0);
}
Ejemplo n.º 18
0
IntelProcess::IntelProcess(ProcessID id, Address entry, bool privileged)
    : Process(id, entry, privileged)
{
    Address stack, stackBase, *pageDir;
    BitAllocator *memory = Kernel::instance->getMemory();
    CPUState *regs;
    Arch::Memory local(0, memory);
    Arch::Memory::Range range;
    Size dirSize = PAGESIZE;
    u16 dataSel = privileged ? KERNEL_DS_SEL : USER_DS_SEL;
    u16 codeSel = privileged ? KERNEL_CS_SEL : USER_CS_SEL;

    // Allocate and map page directory
    memory->allocate(&dirSize, &m_pageDirectory);

    pageDir = (Address *) local.findFree(PAGESIZE, Memory::KernelPrivate);
    local.map(m_pageDirectory, (Address) pageDir,
              Arch::Memory::Present |
              Arch::Memory::Readable |
              Arch::Memory::Writable);

    // Initialize page directory
    for (Size i = 0; i < PAGEDIR_MAX; i++)
        pageDir[i] = 0;

    pageDir[0] = kernelPageDir[0];
    // TODO: this should not be done here. Try to use libarch's Memory class.
    pageDir[DIRENTRY(PAGEDIR_LOCAL) ] = m_pageDirectory | PAGE_PRESENT | PAGE_USER;
    local.unmap((Address)pageDir);

    // Obtain memory mappings
    // TODO: use Memory::create()
    Arch::Memory mem(m_pageDirectory, memory);

    // User stack.
    range.phys   = 0;
    range.virt   = mem.range(Memory::UserStack).virt;
    range.size   = mem.range(Memory::UserStack).size;
    range.access = Arch::Memory::Present |
                   Arch::Memory::User |
                   Arch::Memory::Readable |
                   Arch::Memory::Writable;
    mem.mapRange(&range);
    setUserStack(range.virt + range.size - MEMALIGN);

    // Kernel stack.
    range.phys   = 0;
    range.virt   = mem.range(Memory::KernelStack).virt;
    range.size   = mem.range(Memory::KernelStack).size;
    range.access = Arch::Memory::Present |
                   Arch::Memory::Writable;
    mem.mapRange(&range);
    setKernelStack(range.virt + range.size - sizeof(CPUState)
                                           - sizeof(IRQRegs0)
                                           - sizeof(CPURegs));

    // Map kernel stack
    range.virt = local.findFree(range.size, Memory::KernelPrivate);
    stack      = range.virt;
    local.mapRange(&range);
    stackBase  = stack + range.size;

    // loadCoreState: struct CPUState
    regs = (CPUState *) stackBase - 1;
    MemoryBlock::set(regs, 0, sizeof(CPUState));
    regs->seg.ss0    = KERNEL_DS_SEL;
    regs->seg.fs     = dataSel;
    regs->seg.gs     = dataSel;
    regs->seg.es     = dataSel;
    regs->seg.ds     = dataSel;
    regs->regs.ebp   = m_userStack;
    regs->regs.esp0  = m_kernelStack;
    regs->irq.eip    = entry;
    regs->irq.cs     = codeSel;
    regs->irq.eflags = INTEL_EFLAGS_DEFAULT |
                       INTEL_EFLAGS_IRQ;
    regs->irq.esp3   = m_userStack;
    regs->irq.ss3    = dataSel;

    // restoreState: iret
    IRQRegs0 *irq = (IRQRegs0 *) regs - 1;
    irq->eip = (Address) loadCoreState;
    irq->cs  = KERNEL_CS_SEL;
    irq->eflags = INTEL_EFLAGS_DEFAULT;

    // restoreState: popa
    CPURegs *pusha = (CPURegs *) irq - 1;
    MemoryBlock::set(pusha, 0, sizeof(CPURegs));
    pusha->ebp  = m_kernelStack - sizeof(CPURegs);
    pusha->esp0 = pusha->ebp;

    local.unmapRange(&range);
}
Ejemplo n.º 19
0
			left--;
		}
	}

done:
	*lenp -= left;
	file->f_pos += *lenp;
	return 0;
}

#define DIRENTRY(nam1, nam2, child)	\
	{CTL_##nam1, #nam2, NULL, 0, 0555, child }
#define DBGENTRY(nam1, nam2)	\
	{CTL_##nam1##DEBUG, #nam2 "_debug", &nam2##_debug, sizeof(int),\
	 0644, NULL, &proc_dodebug}

static ctl_table		debug_table[] = {
	DBGENTRY(RPC,  rpc),
	DBGENTRY(NFS,  nfs),
	DBGENTRY(NFSD, nfsd),
	DBGENTRY(NLM,  nlm),
	{0}
};

static ctl_table		sunrpc_table[] = {
	DIRENTRY(SUNRPC, sunrpc, debug_table),
	{0}
};

#endif