//========================================================= // Copy the given page and all consecutive subsequent ones //========================================================= void CopyPages(long address, struct Task *task) { address &= PageMask; // Get physical address of current PT struct PML4 *pml4 = (struct PML4 *) (task->cr3); struct PML4 *current_pml4 = (struct PML4 *) (currentTask->cr3); while (1) { struct PT *pt = GetPT(pml4, address, task->pid); struct PT *currentPT = GetPT(current_pml4, address, currentTask->pid); int i = GetPTIndex(address); if (!(VIRT(PT,currentPT) ->entries[i].value)) break; // Copy the physical memory p_Address data = AllocPage(task->pid); //data += VAddr / 8; CreatePTEWithPT((struct PML4 *)task->cr3, data, address, task->pid, US | RW | P | 0x800); memcpy( (void *) PAGE(((VIRT(PT, pt)) ->entries[i].value)) + VAddr, (void *) PAGE(((VIRT(PT, currentPT)) ->entries[i].value)) + VAddr, PageSize); address += PageSize; } }
static void virtual_init(struct proc *target, struct proc *source) { /* Create pml4 */ target->cr3 = page_phys_addr(get_zeroed_page(ZONE_1GB)); /* For now, only map the kernel 512 Gbyte segment */ *((struct pml4e *)VIRT(target->cr3) + pml4_index(KERN_PAGE_OFFSET)) = *((struct pml4e *)VIRT(source->cr3) + pml4_index(KERN_PAGE_OFFSET)); }
//========================================================================= // Return the physical address of the PDP corresponding to address lAddress //========================================================================= struct PDP *GetPDP(struct PML4 *pml4, long lAddress, unsigned short pid) { int pml4Index = GetPML4Index(lAddress); long pdp = VIRT(PML4,pml4) ->entries[pml4Index].value & 0xFFFFF000; if (!pdp) { long newpage = (long) AllocPage(pid); VIRT(PML4,pml4) ->entries[pml4Index].value = newpage | P | RW | US; pdp = newpage; } return (struct PDP *) (pdp & 0xFFFFF000); }
struct dma_addr sos_dma_malloc (void* cookie, uint32_t size, int cached) { static int alloc_cached = 0; struct dma_addr dma_mem; (void)cookie; assert(_dma_pstart); _dma_pnext = DMA_ALIGN(_dma_pnext); if (_dma_pnext < _dma_pend) { /* If caching policy has changed we round to page boundary */ if (alloc_cached != cached && PAGE_OFFSET(_dma_pnext) != 0) { _dma_pnext = ROUND_UP (_dma_pnext, seL4_PageBits); } alloc_cached = cached; /* no longer need don't need dma_fill since rootsvr does this for us * if we fault */ dma_mem.phys = (eth_paddr_t)_dma_pnext; dma_mem.virt = (eth_vaddr_t)VIRT(dma_mem.phys); _dma_pnext += size; } else { dma_mem.phys = 0; dma_mem.virt = 0; } return dma_mem; }
static unsigned long alloc_vm_pmem ( unsigned long size ) { const unsigned long align = 1 << ( PAGE_SHIFT_2MB - PAGE_SHIFT ); /* alignment for 2 MB page table */ const unsigned long pfn = alloc_pages ( size >> PAGE_SHIFT, align ); return ( unsigned long ) VIRT ( pfn << PAGE_SHIFT ); }
static unsigned long create_intercept_table ( unsigned long size ) { const unsigned long pfn = alloc_pages ( size >> PAGE_SHIFT, 1 ); void *p = ( void * ) VIRT ( pfn << PAGE_SHIFT ); /* vol. 2, p. 445 */ memset ( p, 0xff, size ); return pfn << PAGE_SHIFT; }
static struct vmcb * alloc_vmcb ( void ) { struct vmcb *vmcb; const unsigned long pfn = alloc_pages ( 1, 1 ); vmcb = ( struct vmcb * ) VIRT ( pfn << PAGE_SHIFT ); memset ( ( char * ) vmcb, 0, sizeof ( struct vmcb ) ); return vmcb; }
//===================================================== // Create a Page Table for a new process // Return a pointer to the Page Directory of this table //===================================================== void * VCreatePageDir(unsigned short pid, unsigned short parentPid) { struct PML4 *pml4; struct PD *pd; // Allocate the base page for the Page Table pml4 = (struct PML4 *) AllocPage(pid); VIRT(PML4,pml4) ->entries[GetPML4Index(VAddr)].value = virtualPDP | P | RW; // Physical to virtual addresses pd = GetPD(pml4, 0, pid); VIRT(PD,pd) ->entries[GetPDIndex(0)].value = kernelPT | P | RW; // Kernel entries if (parentPid == 0) // Just create some default PTEs // We need these two entries so that NewKernelTask can // access the data and stack pages of the new process. { long c; struct PT *pt = GetPT(pml4, UserData, pid); VIRT(PT,pt) ->entries[GetPTIndex(UserData)].value = AllocAndCreatePTE(TempUserData, pid, RW | P); c = TempUserData; asm ("invlpg %0;" : :"m"(*(char *)TempUserData) ); pt = GetPT(pml4, KernelStack, pid); VIRT(PT,pt) ->entries[GetPTIndex(KernelStack)].value = AllocAndCreatePTE(TempUStack, pid, RW | P); pt = GetPT(pml4, UserStack, pid); VIRT(PT,pt) ->entries[GetPTIndex(UserStack)].value = AllocAndCreatePTE(TempUStack, pid, RW | P); c = TempUStack; asm ("invlpg %0;" : :"m"(*(char *)TempUStack) ); }
//========================================================================= // Return the physical address of the PT corresponding to address lAddress //========================================================================= struct PT *GetPT(struct PML4 *pml4, long lAddress, unsigned short pid) { int pdIndex = GetPDIndex(lAddress); int pdpIndex = GetPDPIndex(lAddress); int pml4Index = GetPML4Index(lAddress); long pdp, pd, pt; pdp = VIRT(PML4,pml4) ->entries[pml4Index].value & 0xFFFFF000; if (!pdp) { long newpage = (long) AllocPage(pid); VIRT(PML4,pml4) ->entries[pml4Index].value = newpage | P | RW | US; pdp = newpage; } pd = VIRT(PDP,pdp) ->entries[pdpIndex].value & 0xFFFFF000; if (!pd) { long newpage = (long) AllocPage(pid); VIRT(PDP,pdp) ->entries[pdpIndex].value = newpage | P | RW | US; pd = newpage; } pt = VIRT(PD,pd) ->entries[pdIndex].value & 0xFFFFF000; if (!pt) { long newpage = (long) AllocPage(pid); VIRT(PD,pd) ->entries[pdIndex].value = newpage | P | RW | US; pt = newpage; } return (struct PT *) (pt & 0xFFFFF000); }
//========================================================================= // Return the physical address of the PD corresponding to address lAddress //========================================================================= struct PD *GetPD(struct PML4 *pml4, long lAddress, unsigned short pid) { int pdpIndex = lAddress >> 30 & 0x1FF; int pml4Index = lAddress >> 39 & 0x1FF; long pdp, pd; pdp = VIRT(PML4,pml4) ->entries[pml4Index].value & 0xFFFFF000; if (!pdp) { long newpage = (long) AllocPage(pid); VIRT(PML4,pml4) ->entries[pml4Index].value = newpage | P | RW | US; pdp = newpage; } pd = VIRT(PDP,pdp) ->entries[pdpIndex].value & 0xFFFFF000; if (!pd) { long newpage = (long) AllocPage(pid); VIRT(PDP,pdp) ->entries[pdpIndex].value = newpage | P | RW | US; pd = newpage; } return (struct PD *) (pd & 0xFFFFF000); }
/* Create a page table that maps VM's physical addresses to PM's physical address and * return the (PM's) physical base address of the table. */ static unsigned long create_vm_pmem_mapping_table ( unsigned long vm_pmem_start, unsigned long vm_pmem_size ) { const unsigned long cr3 = pml4_table_create ( ); const unsigned long pml4 = ( unsigned long ) VIRT ( cr3 ); const unsigned long vm_pmem_pfn = PFN_DOWN_2MB ( PHYS ( vm_pmem_start ) ); int i; for ( i = 0; i < PFN_UP_2MB ( vm_pmem_size ); i++ ) { const unsigned long vm_paddr = i << PAGE_SHIFT_2MB; const unsigned long pm_pfn = i + ( is_reserved_pmem ( i ) ? 0 : vm_pmem_pfn ); /* for VGA (too naive) */ const unsigned long pm_paddr = pm_pfn << PAGE_SHIFT_2MB; mmap ( pml4, vm_paddr, pm_paddr, 1 /* is_user */ ); } printf ( "Page table for nested paging created.\n" ); return cr3; }