/*
 * void init_paging()
 * 	 Description: Initializes page directories, tables and control registers
 *   Inputs: none
 *   Return Value: none
 */
void init_paging()
{
	// Set up program page (physical) address array
	uint32_t i;
	for(i = 0; i < NUM_PD; i++)
	{
		PGRM_PAGE[i] = PAGE1 + i * PAGE_SIZE;
	}

	// Initialize the Page directory
	init_PD();

	// Initialize the video page table
	init_VIDEO_PT();

	// Set PSE bit in CR4
	set_PSE();

	// Set PG bit in CR0
	set_PG();

	// Set PGE bit in CR4
	set_PGE();

	INVLPG(VIDEO_MEM);
	INVLPG(KERNEL_MEM);

	//test_paging();

}
Beispiel #2
0
/*
 * kmem_map_page_frame_cur(p_adr, v_adr, pages, flags)
 *
 * Maps an area of 'pages' page frames at address 'p_adr' 
 * into a virtual address space defined by the currently
 * active page directory to the virtual address 'v_adr'.
 *
 * You can define the access flags of this pages through
 * the 'flags' parameter.
 *
 * If a page should be mapped to an area of the virtual
 * address space that currently isn't described by a 
 * page table, the function will automatically allocate 
 * and map a new page table.
 *
 * The function will neither free page frames, that will
 * be removed from the address space nor change the
 * usage counter of the selected page frames.
 *
 * This function will also invalidate the TLB of the
 * selected pages.
 *
 * ! If you want to map a page into the current virtual
 * ! address space YO HAVE TO use kmem_map_page_frame_cur
 * ! to prevent the use of an invalid TLB content.
 *
 * Using the flag GENFLAG_DONT_OVERWRITE_SETTINGS will prevent
 * that the function overwrites existing areas.
 *
 * Return Value:
 *	== 0	Successful
 *	> 0	n pages have been skipped
 *	< 0	Error (not enough memory to allocate a new page table)
 *
 */
long kmem_map_page_frame_cur(uintptr_t p_adr, 
		  	     uintptr_t v_adr, 
			     unsigned long pages,
			     unsigned flags
			    )
{
	/* Map the pages */
	long l__retval =
		kmem_map_page_frame(i386_current_pdir, 
				    &current_p[PRCTAB_X86_MMTABLE],
			            p_adr, 
		  		    v_adr, 
				    pages,
		  		    flags
		       		   );

	v_adr &= (~0xFFF);
	/* Invalidate the TLB */				   				   
	while (pages --)
	{
		INVLPG(v_adr);
		v_adr += 4096;
	}		   

	return l__retval;
}
/*
 * uint8_t* user_vidmap()
 * 	 Description: Maps video memory for a user process
 *   Inputs: none
 *   Return Value: A pointer to the newly mapped virtual memory
 */
uint8_t* user_vidmap()
{
	// Set up PTE if necessary
	uint32_t PT_index = get_PTE_offset(USER_VID);

	if(PT[1][PT_index].present == 0)
	{
		PTE_t PTE_video;
		PTE_video = PTE_default;
		PTE_video.user_super = 1;
		PTE_video.read_write = 1;

		SET_PTE(PTE_video, VIDEO_MEM);
		PT[1][PT_index] = PTE_video;
	}

	// Set up PDE if necessary
	uint32_t PD_index = get_PDE_offset(USER_VID);

	if(cur_PD[PD_index].present == 0)
	{
		PDE_t PDE_video;
		PDE_video = PDE_default;
		PDE_video.user_super = 1;
		PDE_video.read_write = 1;

		SET_PDE_4KB_PT(PDE_video, PT[1]);
		cur_PD[PD_index] = PDE_video;
	}

	INVLPG(USER_VID);

	return (uint8_t*)USER_VID;
}
Beispiel #4
0
kern_obj * spawn_process(const uchar * cd, ulong csz, ulong dsz, ulong bsz, kern_obj * par, uchar priv, uchar priority)
{
	ulong i, pbase, ssz, absz, pgs;
	kern_obj * proc = alloc_kobj();
	
	ssz = csz + dsz;
	absz = bsz;

	if (csz % 0x1000 != 0) csz = csz - (csz % 0x1000) + 0x1000;
	if (dsz % 0x1000 != 0) dsz = dsz - (dsz % 0x1000) + 0x1000;
	if (bsz % 0x1000 != 0) bsz = bsz - (bsz % 0x1000) + 0x1000;

	proc->type = KOBJ_PROCESS;
	proc->u.proc.parent = par;
	proc->u.proc.priv = priv;
	proc->u.proc.priority = priority;
	
	proc->u.proc.code_data_ws = alloc_ws();
	
	pgs = csz / 0x1000 + dsz / 0x1000 + bsz / 0x1000;
	pbase = alloc_pages(pgs + 0x200, PHYS_PAGES);
	if (pbase == 0) {
		free_kobj(proc);
		return NULL;
	}

	add_pgs_to_ws(proc->u.proc.code_data_ws, USPACE_BOT, pbase, csz + dsz + bsz + 0x200000, 7);

	for (i = 0; i < ssz + absz; i++) {
		if (i % 0x1000 == 0) {
			ATQ(KPT0_0_LOC) = (pbase + i) | 3;
			INVLPG(KSPACE_LOC);
		}
		if (i < ssz) ATB(KSPACE_LOC + i % 0x1000) = cd[i];
		else ATB(KSPACE_LOC + i % 0x1000) = 0;
	}

	if (spawn_thread(proc, (int (*)())USPACE_BOT) == NULL) {
		free_kobj(proc);
		free_pages(pbase, pgs, PHYS_PAGES);
		return NULL;
	}

	getlock(&procthrd_lock, 0);
	if (head_process == NULL) {
		proc->u.proc.next = proc;
		head_process = proc;
	} else {
		proc->u.proc.next = head_process->u.proc.next;
		head_process->u.proc.next = proc;
	}
	unlock(&procthrd_lock, 0);
	
	return proc;
}
Beispiel #5
0
Datei: vm.c Projekt: Fluray/OrzOs
void vma_freepages(vma_t *vma)
{
    if (!vma)
        return;

    u32int i;
    for (i=vma->start; i<vma->end; i+=PAGE_SIZE) {
        /*printk("free_page %p\n", i);*/
        INVLPG(i);
        free_page( get_page(i,0,current_task->dir) );
    }
}
Beispiel #6
0
kern_obj * spawn_thread(kern_obj * proc, int (*func)())
{
	ulong fl, pg;
	kern_obj * thrd = alloc_kobj();

	thrd->type = KOBJ_THREAD;
	thrd->u.thrd.proc = proc;
	
	thrd->u.thrd.sub = alloc_kobj();
	thrd->u.thrd.sub->type = KOBJ_THREAD_SUB;
	thrd->u.thrd.sub->u.thrd2.stack_ws = alloc_ws();

	pg = alloc_pages(2, PHYS_PAGES);
	if (pg == 0) {
		free_kobj(thrd->u.thrd.sub);
		free_kobj(thrd);
		return 0;
	}

	add_pgs_to_ws(thrd->u.thrd.sub->u.thrd2.stack_ws, USPACE_TOP - 0x2000, pg, 0x2000, 7);
	thrd->u.thrd.sub->u.thrd2.rsp = USPACE_TOP - 0x50;
	
	ATQ(KPT0_0_LOC) = (pg + 0x1000) | 3;
	INVLPG(KSPACE_LOC);	

	ATQ(KSPACE_LOC + 0xFB0) = (ulong)func;
	ATQ(KSPACE_LOC + 0xFB8) = USER_CS | 3;
	GET_FLAGS(fl);
	ATQ(KSPACE_LOC + 0xFC0) = fl;
	ATQ(KSPACE_LOC + 0xFC8) = USPACE_TOP - 0x1000;
	ATQ(KSPACE_LOC + 0xFD0) = USER_DS | 3;

	getlock(&procthrd_lock, 1);
	if (head_thread == NULL) {
		thrd->u.thrd.next = thrd;
		head_thread = thrd;
	} else {
		thrd->u.thrd.next = head_thread->u.thrd.next;
		head_thread->u.thrd.next = thrd;
	}
	unlock(&procthrd_lock, 1);

	return thrd;
}
Beispiel #7
0
void swap_ws(kern_obj * ws, int wsi)
{
	kern_obj * old;
	uint i;
	
	old = cur_ws[wsi];
	cur_ws[wsi] = ws;
	
	while (old != NULL) {
		for (i = 0; i < old->u.ws.size; i += 0x1000) {
			set_pt_entry(old->u.ws.vbase + i, 0, old->u.ws.flags);
			INVLPG(old->u.ws.vbase + i);
		}
		old = old->u.ws.next;
	}

	while (ws != NULL) {
		page_in(ws->u.ws.pbase, ws->u.ws.vbase, ws->u.ws.size, ws->u.ws.flags);		
		ws = ws->u.ws.next;
	}
}
Beispiel #8
0
/*
 * kmem_map_page_frame(pdir, pstat, p_adr, v_adr, pages, flags)
 *
 * Maps an area of 'pages' page frames at address 'p_adr' 
 * into a virtual address space defined by the page directory 
 * 'pdir' to the virtual address 'v_adr'. The function will
 * update the table usage statistic table 'pstat'.
 *
 * You can define the access flags of this pages through
 * the 'flags' parameter.
 *
 * If a page should be mapped to an area of the virtual
 * address space that currently isn't described by a 
 * page table, the function will automatically allocate 
 * and map a new page table.
 *
 * The function will neither free page frames, that will
 * be removed from the address space nor change the
 * usage counter of the selected page frames.
 *
 * ! If you want to map a page into the current virtual
 * ! address space YOU HAVE TO use kmem_map_page_frame_cur
 * ! to prevent the use of an invalid TLB content.
 *
 * Using the flag GENFLAG_DONT_OVERWRITE_SETTINGS will prevent
 * that the function overwrites existing areas.
 * 
 * Return Value:
 *	== 0	Successful
 *	> 0	n pages have been skipped
 *	< 0	Error (not enough memory to allocate a new page table)
 *
 */
long kmem_map_page_frame(uint32_t *pdir, 
			uint32_t *pstat,
		  	uintptr_t p_adr, 
		  	uintptr_t v_adr, 
			unsigned long pages,
		  	unsigned flags
		       )
{
	uint32_t *l__tab = NULL;
	unsigned long l__lasttab = 0; 
	long l__retval = 0;
		
	/* The page offset isn't interesting */
	v_adr &= (~0xFFFU);
	p_adr &= (~0xFFFU);
	
	/* Change the page descriptors */
	while(pages --)
	{
		unsigned long l__offs = (v_adr / 4096) & 0x3ff;
		int l__do_invlpg = 0;
		
		/* Is there a need of selecting a new page table ? */
		if (   ((v_adr / (4096 * 1024)) != l__lasttab) 
		    || (l__tab == NULL)
		   )
		{
			l__tab = kmem_get_table(pdir, v_adr, true);
			if (l__tab == NULL) return -1;
			l__lasttab = v_adr / (4096 * 1024);
		}
		
		if (    (flags & GENFLAG_DONT_OVERWRITE_SETTINGS)
		     && (l__tab[l__offs] & GENFLAG_PRESENT)
		   )
		{
			l__retval ++;
			continue;	
		}

		/* Update usage status (page descriptor used) */
		if (    (flags & GENFLAG_PRESENT)
		     && (v_adr < VAS_USER_END)
		     && (!(l__tab[l__offs] & GENFLAG_PRESENT))
		   )
		{
			pstat[v_adr / (4096 * 1024)] ++;
		}
		
		if (    (l__tab[l__offs] & GENFLAG_PRESENT)
		     && (!(flags & GENFLAG_PRESENT))
		     && (v_adr < VAS_USER_END)
		   )
		{
			pstat[v_adr / (4096 * 1024)] --;
		}
		
		/* Do we need to INVLPG? */
		if ((l__tab[l__offs] & GENFLAG_GLOBAL) || (flags & GENFLAG_GLOBAL)) l__do_invlpg = 1;
		
		/* Write the page descriptor */
		l__tab[l__offs] =   (p_adr & (~0xFFFU))		
		                  | (flags & 0xFFFU);
		
		/* Invalidate TLB, if needed */
		if (l__do_invlpg) INVLPG(v_adr);
				  
		/* Increment the addresses */
		p_adr += 4096;
		v_adr += 4096;
	}
	
	return l__retval;
}		 
Beispiel #9
0
static void page_to_kpt0_0(ulong val)
{
	ATQ(KPT0_0_LOC) = val;
	INVLPG(KSPACE_LOC);
}
Beispiel #10
0
/**
 * \brief Clone a page from an entry
 * \param Ent	Pointer to the entry in the PML4/PDP/PD/PT
 * \param NextLevel	Pointer to contents of the entry
 * \param Addr	Dest address
 * \note Used in COW
 */
void MM_int_ClonePageEnt( Uint64 *Ent, void *NextLevel, tVAddr Addr, int bTable )
{
	tPAddr	curpage = *Ent & PADDR_MASK; 
	 int	bCopied = 0;
	
	if( MM_GetRefCount( curpage ) <= 0 ) {
		Log_KernelPanic("MMVirt", "Page %P still marked COW, but unreferenced", curpage);
	}
	if( MM_GetRefCount( curpage ) == 1 )
	{
		*Ent &= ~PF_COW;
		*Ent |= PF_PRESENT|PF_WRITE;
		#if TRACE_COW
		Log_Debug("MMVirt", "COW ent at %p (%p) only %P", Ent, NextLevel, curpage);
		#endif
	}
	else
	{
		void	*tmp;
		tPAddr	paddr;
		
		if( !(paddr = MM_AllocPhys()) ) {
			Threads_SegFault(Addr);
			return ;
		}

		ASSERT(paddr != curpage);
			
		tmp = MM_MapTemp(paddr);
		memcpy( tmp, NextLevel, 0x1000 );
		MM_FreeTemp( tmp );
		
		#if TRACE_COW
		Log_Debug("MMVirt", "COW ent at %p (%p) from %P to %P", Ent, NextLevel, curpage, paddr);
		#endif

		MM_DerefPhys( curpage );
		*Ent &= PF_USER;
		*Ent |= paddr|PF_PRESENT|PF_WRITE;
		
		bCopied = 1;
	}
	INVLPG( (tVAddr)NextLevel );
	
	// Mark COW on contents if it's a PDPT, Dir or Table
	if(bTable) 
	{
		Uint64	*dp = NextLevel;
		 int	i;
		for( i = 0; i < 512; i ++ )
		{
			if( !(dp[i] & PF_PRESENT) )
				continue;
			
			if( bCopied )
				MM_RefPhys( dp[i] & PADDR_MASK );
			if( dp[i] & PF_WRITE ) {
				dp[i] &= ~PF_WRITE;
				dp[i] |= PF_COW;
			}
		}
	}
}