Esempio n. 1
0
/*
 *	When the swapper has found a task it wishes to make run we do the
 *	dirty work. On failure we return -1
 */
static int make_runnable(register struct task_struct *t)
{
    char flags = t->mm.flags;

    if (flags & CS_SWAP)
	if (swap_in(t->mm.cseg, 1) == -1)
	    return -1;
    if (flags & DS_SWAP)
	if (swap_in(t->mm.dseg, 0) == -1)
	    return -1;
    return 0;
}
Esempio n. 2
0
void page_replacement(void *vaddr)
{
  uint32_t *paddr,*lru_page;
  
  if ((paddr = palloc_get_page(PAL_USER)) != NULL)
    swap_in(vaddr, paddr);
  else
  {
    lru_page = lru_get_page();

    swap_out(lru_page);
    swap_in(vaddr,lru_page);
  }
}
Esempio n. 3
0
int page_fault(page_table_leaf_t *page_entry) {
    frame_t victim = 0;

    stats.page_faults++;

    /*
     * If there are no free frames, then we need to swap something out
     */
    if( next_free_frame < 0 ) {
        /*
         * Pick a victim to swap out
         */
        page_replacement_alg(&victim);

        /*
         * Swap out the page
         */
        swap_out(victim);
    }

    /*
     * Swap-In the page
     */
    swap_in(page_entry);

    return 0;
}
Esempio n. 4
0
void do_pagefault(struct pf_msg *pmsg)
{
   int frame;
   int page;

   verify_vm_access(pmsg->page_idx, pmsg->word_idx);

   frame = find_pgt_index();
   page = find_vm_index(frame);

   lock_memory();
   lock_vm();
   lock_pagetable();

   //printf("Swapping out frame %d\n", frame);

   swap_out(page, frame);
   usleep(F);

   //printf("Swapping in page %d\n", pmsg->page_idx);
   swap_in(pmsg->page_idx, frame);
   usleep(F);

   page_table[page] = -1;
   page_table[pmsg->page_idx] = frame;

   unlock_memory();
   unlock_vm();
   unlock_pagetable();
}
void asignar_marco(pagina_t* * pagina, segmento_t** segmento, uint32_t pid){
	marco_t* marco=NULL;
	marco = buscar_marco_libre();

	//Si no hay ningun marco libre, swappeo
	//Si hay un marco libre, se lo asigno a la pagina
	if((*pagina)->en_disco){
		swap_out(pid, (*segmento)->id, pagina);
	}else{
		if(marco == NULL){
			swap_in(pagina, (*segmento)->id, pid);
		}else{
			(*pagina)->marco=marco->id;
			(*pagina)->tiene_marco= true;
			marco->id_proceso = pid;
			marco->ocupado= true;

			loggear_trace("Se asigno el marco %d a la pagina %d del segmento %d del proceso %d.",
					marco->id, (*pagina)->id, (*segmento)->id, pid);

			if(cantidad_marcos_libre() == 0){
				loggear_info("Espacio de memoria principal lleno");
			}
		}
	}
}
Esempio n. 6
0
bool 
spage_table_load (struct spage_table_entry *spte, enum spage_types type)
{
  // P3: reduce race condition with frame eviction process.
  spte->inevictable = true;

  if (spte->in_memory) return false;

  if (type == SWAP)
    {
      uint8_t *f = frame_alloc (PAL_USER, spte);
      if (!f) return false;

      if (!install_page (spte->upage, f, spte->writable))
	{
	  frame_free (f);
	  return false;
	}

      swap_in (spte->swap_slot_id, spte->upage);
      spte->in_memory = true;
    }
  
  if (type == FILE || type == MMAP)
    {
      enum palloc_flags fg = PAL_USER;
      if (spte->file_read_bytes == 0) fg |= PAL_ZERO;

      uint8_t *f = frame_alloc (fg, spte);
      if (!f) return false;

      if (spte->file_read_bytes > 0)
	{
	  lock_acquire (&lock_f);
	  if ((int) spte->file_read_bytes != file_read_at (spte->file, f, spte->file_read_bytes,
							   spte->file_offset))
	    {
	      lock_release (&lock_f);
	      frame_free (f);
	      return false;
	    }
	  lock_release (&lock_f);
	  memset (f + spte->file_read_bytes, 0, spte->file_zero_bytes);
	}
      
      if (!install_page (spte->upage, f, spte->writable))
	{
	  frame_free (f);
	  return false;
	}
      
      spte->in_memory = true;  
    }
  
  return true;
}
Esempio n. 7
0
/**
* unmap_virtual_page - poistetaan virtuaalisen sivun osoitus
* @phys_pd: käytettävän sivuhakemiston fyysinen sivu
* @virt_page: sivu, joka pitää osoittaa
**/
void unmap_virtual_page(uint_t phys_pd, uint_t virt_page)
{
	uint_t new_location;
	uint_t pde, pte;
	page_entry_t *pd, *pt;

	pde = virt_page / MEMORY_PE_COUNT;
	pte = virt_page % MEMORY_PE_COUNT;

	if (!phys_pd) {
		phys_pd = cur_phys_pd();
	}

	pd = temp_page_directory(phys_pd);
	if (pd[pde].pagenum == 0) {
		panic("unmap_virtual_page: (pd[pde].pagenum == 0)!");
	}
	if (!pd[pde].present) {
		new_location = swap_in(phys_pd, pd[pde].pagenum);
		if (!new_location) {
			panic("unmap_virtual_page (3)");
		}
		pd[pde].pagenum = new_location;
		pd[pde].present = 1;
	}

	pt = temp_page_directory(pd[pde].pagenum);
	if (pt[pte].pagenum == 0) {
		panic("unmap_virtual_page: (pt[pte].pagenum == 0)!");
	}
	if (!pt[pte].present) {
		new_location = swap_in(phys_pd, pt[pte].pagenum);
		if (!new_location) {
			panic("unmap_virtual_page (4)");
		}
		pt[pte].pagenum = new_location;
		pt[pte].present = 1;
	}
	free_phys_page(pt[pte].pagenum);
	pt[pte] = NULL_PE;
	flush_pagedir(phys_pd);
	return;
}
Esempio n. 8
0
/*===========================================================================*
 *				main					     *
 *===========================================================================*/
PUBLIC int main()
{
    /* Main routine of the process manager. */
    int result, s, proc_nr;
    struct mproc *rmp;
    sigset_t sigset;

    pm_init();			/* initialize process manager tables */

    /* This is PM's main loop-  get work and do it, forever and forever. */
    while (TRUE) {
        get_work();		/* wait for an PM system call */

        /* Check for system notifications first. Special cases. */
        if (call_nr == SYN_ALARM) {
            pm_expire_timers(m_in.NOTIFY_TIMESTAMP);
            result = SUSPEND;		/* don't reply */
        } else if (call_nr == SYS_SIG) {	/* signals pending */
            sigset = m_in.NOTIFY_ARG;
            if (sigismember(&sigset, SIGKSIG))  (void) ksig_pending();
            result = SUSPEND;		/* don't reply */
        }
        /* Else, if the system call number is valid, perform the call. */
        else if ((unsigned) call_nr >= NCALLS) {
            result = ENOSYS;
        } else {
            result = (*call_vec[call_nr])();
        }

        /* Send the results back to the user to indicate completion. */
        if (result != SUSPEND) setreply(who, result);

        swap_in();		/* maybe a process can be swapped in? */

        /* Send out all pending reply messages, including the answer to
         * the call just made above.  The processes must not be swapped out.
         */
        for (proc_nr=0, rmp=mproc; proc_nr < NR_PROCS; proc_nr++, rmp++) {
            /* In the meantime, the process may have been killed by a
             * signal (e.g. if a lethal pending signal was unblocked)
             * without the PM realizing it. If the slot is no longer in
             * use or just a zombie, don't try to reply.
             */
            if ((rmp->mp_flags & (REPLY | ONSWAP | IN_USE | ZOMBIE)) ==
                    (REPLY | IN_USE)) {
                if ((s=send(proc_nr, &rmp->mp_reply)) != OK) {
                    panic(__FILE__,"PM can't reply to", proc_nr);
                }
                rmp->mp_flags &= ~REPLY;
            }
        }
    }
    return(OK);
}
Esempio n. 9
0
/*
 * page fault handler
 * - it deals with 4 scenarios:
 *   - expand user space stack
 *   - swap process in
 *   - do copy-on-write
 *   - handle segment fault
 */
void trap_memory_handler(struct user_context *user_ctx)
{
	unsigned long addr = (unsigned long)user_ctx->addr;
	unsigned int page_index = PAGE_UINDEX(addr);
	struct my_pte *ptep = current->page_table + page_index;
	int ret;

	_enter("pid = %u, at %p(%u), code = %d",
			current->pid, addr, page_index, user_ctx->code);

	switch (user_ctx->code) {
	case YALNIX_MAPERR:
		/* expand stack */
		if (!ptep->swap && page_index == current->stack_start - 1) {
			task_vm_expand_stack(current, 1);
			break;
		}

		/* swap process in */
		if (ptep->swap) {
			ret = swap_in(current);
			if (ret == EIO) {
				sys_tty_write(0, "Abort! Swap In error!\n",
						64, user_ctx);
				sys_exit(ret, user_ctx);
			}
		}

		break;
	case YALNIX_ACCERR:
		break;
	default:
#ifdef COW
		/* do copy-on-write for the parent and children */
		if (ptep->cow && ptep->prot == PROT_READ) {
			task_cow_copy_page(current, page_index);
			break;
		}
#endif
		/* if tried to write text segment just abort it */
		if (ptep->prot == (PROT_READ | PROT_EXEC)) {
			sys_tty_write(0, "Abort! Segment Fault!\n",
					64, user_ctx);
			sys_exit(ERROR, user_ctx);
		}

		break;
	}

	_leave();
	return;
}
Esempio n. 10
0
File: swap.c Progetto: xcw0579/mudOS
/*
 * Reload line number information from swap.
 */
void load_line_numbers(program_t *  prog)
{
    int size;

    if (prog->line_info)
	return;

    debug(d_flag, ("Unswap line numbers for /%s\n", prog->name));

    size = swap_in((char **) &prog->file_info, prog->line_swap_index);
    SET_TAG(prog->file_info, TAG_LINENUMBERS);
    prog->line_info = (unsigned char *)&prog->file_info[prog->file_info[1]];
    line_num_bytes_swapped -= size;
}
Esempio n. 11
0
/**
* temp_virt_page - väliaikainen käyttökelpoinen osoitin toisen taulun sivuun
* @page: virtuaalisivun numero (ei oikea sivunumero vaan tämän funktion asia)
* @phys_pd: fyysinen sivu, jolla sivutaulu sijaitsee, tai 0 poistoa varten
* @virt_page: haettavan virtuaalisen sivun numero tai 0 poistoa varten
**/
void * temp_virt_page(uint_t page, uint_t phys_pd, uint_t virt_page)
{
	page_entry_t *pd, *pt;
	uint_t new_location;

	if (!phys_pd || !virt_page) {
		return temp_phys_page(page, 0);
	}

	uint_t pde = virt_page / MEMORY_PE_COUNT;
	uint_t pte = virt_page % MEMORY_PE_COUNT;

	if (!(pd = temp_page_directory(phys_pd)) || !pd[pde].pagenum) {
		return 0;
	}
	if (!pd[pde].present) {
		new_location = swap_in(phys_pd, pd[pde].pagenum);
		if (!new_location) {
			return 0;
		}
		pd[pde].pagenum = new_location;
		pd[pde].present = 1;
	}
	if (!(pt = temp_page_table(pd[pde].pagenum)) || !pt[pte].pagenum) {
		return 0;
	}
	if (!pt[pte].present) {
		new_location = swap_in(phys_pd, pt[pte].pagenum);
		if (!new_location) {
			return 0;
		}
		pt[pte].pagenum = new_location;
		pt[pte].present = 1;
	}
	return temp_phys_page(page, pt[pte].pagenum);
}
Esempio n. 12
0
paddr_t page_fault(vaddr_t faultaddress,struct page_table_entry *pt_entry_temp2)
{
	struct page_table_entry *pt_entry = curthread->t_addrspace->page_table ;
	struct page_table_entry *pt_entry_temp = NULL;
	while(pt_entry != NULL){
		if(pt_entry->va == faultaddress) // Additional checks to be implemented later
		{
			if(pt_entry->state == PAGE_IN_MEMORY){
					pt_entry_temp2->state = 1000;
					struct coremap *local_coremap = coremap_list;
					while(local_coremap!=NULL){
						if(local_coremap->pa == pt_entry->pa){
							local_coremap->timestamp = localtime++;
							break;
						}
						local_coremap = local_coremap->next;
					}
					return pt_entry->pa ;
			}
			else{
				paddr_t p = swap_in(pt_entry->offset);
				pt_entry->state = PAGE_IN_MEMORY;
				pt_entry->pa = p;
				pt_entry_temp2->state = 1000;
				return p;
			}			
		}
		pt_entry_temp = pt_entry ;
		pt_entry = pt_entry->next ;
	}
	paddr_t paddr = user_page_alloc() ;
	pt_entry_temp2->va =  faultaddress;
	pt_entry_temp2->pa =  paddr ;
	pt_entry_temp2->state = PAGE_IN_MEMORY ; 
	pt_entry_temp2->offset = -1;
	pt_entry_temp2->next = NULL ;

	if (pt_entry_temp != NULL)
	{
		 pt_entry_temp->next = pt_entry_temp2 ;
	}
	else
	{
		curthread->t_addrspace->page_table = pt_entry_temp2 ;
	}

	return paddr ;
}
Esempio n. 13
0
void zmq::pipe_t::set_head (uint64_t position_)
{
    //  This may cause the next write to succeed.
    in_core_msg_cnt -= position_ - last_head_position;
    last_head_position = position_;

    //  Transfer messages from the data dam into the main memory.
    if (swapping && in_core_msg_cnt < (size_t) lwm)
        swap_in ();

    //  If there's a gap notification waiting, push it into the queue.
    if (delayed_gap && check_write ()) {
        raw_message_t msg;
        raw_message_init_notification (&msg, raw_message_t::gap_tag);
        write (&msg);
        delayed_gap = false;
    }
}
void swap_out(uint32_t pid, uint16_t id_segmento, pagina_t* * pagina)
{
	// ----Abro el archivo----
	uint16_t id_pagina = (*pagina)->id;
	//Convierte cada id a string y despues los concatena de 2 en 2
	char *nombre_archivo=generar_nombre_archivo_swap(pid, id_segmento, id_pagina);

	char* path;
	path=concat_string("en_disco/",nombre_archivo);
	path=concat_string(path,".txt");
	free(nombre_archivo);
	//crea un archivo y lo guarda en una carpeta interna.
	//el nombre se compone de pid, idsegmento y id pagina
	FILE* arch = fopen(path,"r");

	// ----Le consigo un marco a la pagina----
	marco_t* marco = buscar_marco_libre();

	if(marco==NULL){
		swap_in(pagina, id_segmento, pid);
	}else{
		(*pagina)->marco=marco->id;
	}

	//Ahora pagina es de su marco
	(*pagina)->tiene_marco=true;
	(*pagina)->en_disco=false;

	//Ahora el marco es de su pagina
	marco = buscar_marco_segun_id((*pagina)->marco);
	marco->id_proceso=pid;
	(*pagina)->marco=marco->id;
	marco->ocupado=true;
	fread(marco->datos,sizeof(char),256,arch);

	// ----Finalizo----
	remove(path);
	free(path);

	disminuyo_cantidad_archivos_swap();

	loggear_trace("Va a memoria pagina %d del segmento %d del proceso %d en marco %d.",
			(*pagina)->id, id_segmento, marco->id_proceso, marco->id);
}
Esempio n. 15
0
File: swap.c Progetto: xcw0579/mudOS
void load_ob_from_swap(object_t *  ob)
{
    if (ob->swap_num == -1)
	fatal("Loading not swapped object.\n");

    debug(d_flag, ("Unswap object /%s (ref %d)", ob->name, ob->ref));

    swap_in((char **) &ob->prog, ob->swap_num);
    SET_TAG(ob->prog, TAG_PROGRAM);
    /*
     * to be relocated: program functions strings variable_names inherit
     * argument_types type_start
     */
    locate_in(ob->prog);	/* relocate the internal pointers */

    /* The reference count will already be 1 ! */
    ob->flags &= ~O_SWAPPED;
    num_swapped--;
    total_prog_block_size += ob->prog->total_size;
    total_num_prog_blocks += 1;
}
Esempio n. 16
0
/*===========================================================================*
 *				main					     *
 *===========================================================================*/
PUBLIC void main()
{
/* Main routine of the memory manager. */

  int result, proc_nr;
  struct mproc *rmp;

  mm_init();			/* initialize memory manager tables */

  /* This is MM's main loop-  get work and do it, forever and forever. */
  while (TRUE) {
	get_work();		/* wait for an MM system call */

	/* If the call number is valid, perform the call. */
	if ((unsigned) mm_call >= NCALLS) {
		result = ENOSYS;
	} else {
		result = (*call_vec[mm_call])();
	}

	/* Send the results back to the user to indicate completion. */
	if (result != E_NO_MESSAGE) setreply(who, result);

	swap_in();		/* maybe a process can be swapped in? */

	/* Send out all pending reply messages, including the answer to
	 * the call just made above.  The processes must not be swapped out.
	 */
	for (proc_nr = 0, rmp = mproc; proc_nr < NR_PROCS; proc_nr++, rmp++) {
		if ((rmp->mp_flags & (REPLY | ONSWAP)) == REPLY) {
			if (send(proc_nr, &rmp->mp_reply) != OK)
				panic("MM can't reply to", proc_nr);
			rmp->mp_flags &= ~REPLY;
		}
	}
  }
}
Esempio n. 17
0
static inline void do_swap_page(struct task_struct * tsk, 
	struct vm_area_struct * vma, unsigned long address,
	pte_t * page_table, pte_t entry, int write_access)
{
	pte_t page;

	if (!vma->vm_ops || !vma->vm_ops->swapin) {
		swap_in(tsk, vma, page_table, pte_val(entry), write_access);
		flush_page_to_ram(pte_page(*page_table));
		return;
	}
	page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
	if (pte_val(*page_table) != pte_val(entry)) {
		free_page(pte_page(page));
		return;
	}
	if (mem_map[MAP_NR(pte_page(page))].count > 1 && !(vma->vm_flags & VM_SHARED))
		page = pte_wrprotect(page);
	++vma->vm_mm->rss;
	++tsk->maj_flt;
	flush_page_to_ram(pte_page(page));
	set_pte(page_table, page);
	return;
}
Esempio n. 18
0
File: vm.c Progetto: gapry/AOS
void
sos_VMFaultHandler(seL4_CPtr reply_cap, seL4_Word fault_addr, seL4_Word fsr, bool is_code){
    dprintf(3, "sos vmfault handler \n");

    int err;

    dprintf(3, "sos vmfault handler, getting as \n");
    addrspace_t *as = proc_getas();
    dprintf(3, "sos vmfault handler, gotten as \n");
    if (as == NULL) {
        dprintf(3, "app as is NULL\n");
        /* Kernel is probably failed when bootstraping */
        set_cur_proc(PROC_NULL);
        cspace_free_slot(cur_cspace, reply_cap);
        return;
    }

    region_t *reg;

    /* Is this a segfault? */
    if (_check_segfault(as, fault_addr, fsr, &reg)) {
        dprintf(3, "vmf: segfault\n");
        proc_destroy(proc_get_id());
        set_cur_proc(PROC_NULL);
        cspace_free_slot(cur_cspace, reply_cap);
        return;
    }

    /*
     * If it comes here, this must be a valid address Therefore, this page is
     * either swapped out has never been mapped in
     */

    VMF_cont_t *cont = malloc(sizeof(VMF_cont_t));
    if (cont == NULL) {
        dprintf(3, "vmfault out of mem\n");
        /* We cannot handle the fault but the process still can run
         * There will be more faults coming though */
        set_cur_proc(PROC_NULL);
        seL4_MessageInfo_t reply = seL4_MessageInfo_new(0, 0, 0, 0);
        seL4_Send(reply_cap, reply);
        cspace_free_slot(cur_cspace, reply_cap);
        return;
    }

    cont->reply_cap = reply_cap;
    cont->as        = as;
    cont->vaddr     = fault_addr;
    cont->is_code   = is_code;
    cont->reg       = reg;
    cont->pid       = proc_get_id();

    /* Check if this page is an new, unmaped page or is it just swapped out */
    if (sos_page_is_inuse(as, fault_addr)) {
        if (sos_page_is_swapped(as, fault_addr)) {
            dprintf(3, "vmf tries to swapin\n");
            /* This page is swapped out, we need to swap it back in */
            err = swap_in(cont->as, cont->reg->rights, cont->vaddr,
                      cont->is_code, _sos_VMFaultHandler_reply, cont);
            if (err) {
                _sos_VMFaultHandler_reply((void*)cont, err);
                return;
            }
            return;
        } else {
            if (sos_page_is_locked(as, fault_addr)) {
                dprintf(3, "vmf page is locked\n");
                _sos_VMFaultHandler_reply((void*)cont, EFAULT);
                return;
            }

            dprintf(3, "vmf second chance mapping page back in\n");
            err = _set_page_reference(cont->as, cont->vaddr, cont->reg->rights);
            _sos_VMFaultHandler_reply((void*)cont, err);
            return;
        }
    } else {
        /* This page has never been mapped, so do that and return */
        dprintf(3, "vmf tries to map a page\n");
        inc_proc_size_proc(cur_proc());
        err = sos_page_map(proc_get_id(), as, fault_addr, reg->rights, _sos_VMFaultHandler_reply, (void*)cont, false);
        if(err){
            dec_proc_size_proc(cur_proc());
            _sos_VMFaultHandler_reply((void*)cont, err);
        }
        return;
    }
    /* Otherwise, this is not handled */
    dprintf(3, "vmf error at the end\n");
    _sos_VMFaultHandler_reply((void*)cont, EFAULT);
    return;
}
Esempio n. 19
0
/* do_pgfault - interrupt handler to process the page fault execption
 * @mm         : the control struct for a set of vma using the same PDT
 * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
 * @addr       : the addr which causes a memory access exception, (the contents of the CR2 register)
 *
 * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
 * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
 * the exception and recovering from it.
 *   (1) The contents of the CR2 register. The processor loads the CR2 register with the
 *       32-bit linear address that generated the exception. The do_pgfault fun can
 *       use this address to locate the corresponding page directory and page-table
 *       entries.
 *   (2) An error code on the kernel stack. The error code for a page fault has a format different from
 *       that for other exceptions. The error code tells the exception handler three things:
 *         -- The P flag   (bit 0) indicates whether the exception was due to a not-present page (0)
 *            or to either an access rights violation or the use of a reserved bit (1).
 *         -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
 *            was a read (0) or write (1).
 *         -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
 *            or supervisor mode (0) at the time of the exception.
 */
int
do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
    int ret = -E_INVAL;
    //try to find a vma which include addr
    struct vma_struct *vma = find_vma(mm, addr);

    pgfault_num++;
    //If the addr is in the range of a mm's vma?
    if (vma == NULL || vma->vm_start > addr) {
        cprintf("not valid addr %x, and  can not find it in vma\n", addr);
        goto failed;
    }
    //check the error_code
    switch (error_code & 3) {
    default:
            /* error code flag : default is 3 ( W/R=1, P=1): write, present */
    case 2: /* error code flag : (W/R=1, P=0): write, not present */
        if (!(vma->vm_flags & VM_WRITE)) {
            cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
            goto failed;
        }
        break;
    case 1: /* error code flag : (W/R=0, P=1): read, present */
        cprintf("do_pgfault failed: error code flag = read AND present\n");
        goto failed;
    case 0: /* error code flag : (W/R=0, P=0): read, not present */
        if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
            cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
            goto failed;
        }
    }
    /* IF (write an existed addr ) OR
     *    (write an non_existed addr && addr is writable) OR
     *    (read  an non_existed addr && addr is readable)
     * THEN
     *    continue process
     */
    uint32_t perm = PTE_U;
    if (vma->vm_flags & VM_WRITE) {
        perm |= PTE_W;
    }
    addr = ROUNDDOWN(addr, PGSIZE);

    ret = -E_NO_MEM;

    pte_t *ptep=NULL;
    /*LAB3 EXERCISE 1: 2012012139
    * Maybe you want help comment, BELOW comments can help you finish the code
    *
    * Some Useful MACROs and DEFINEs, you can use them in below implementation.
    * MACROs or Functions:
    *   get_pte : get an pte and return the kernel virtual address of this pte for la
    *             if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
    *   pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
    *             an addr map pa<--->la with linear address la and the PDT pgdir
    * DEFINES:
    *   VM_WRITE  : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
    *   PTE_W           0x002                   // page table/directory entry flags bit : Writeable
    *   PTE_U           0x004                   // page table/directory entry flags bit : User can access
    * VARIABLES:
    *   mm->pgdir : the PDT of these vma
    *
    */
#if 0
    /*LAB3 EXERCISE 1: 2012012139*/
    ptep = ???              //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
    if (*ptep == 0) {
                            //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr

    }
    else {
    /*LAB3 EXERCISE 2: 2012012139
    * Now we think this pte is a  swap entry, we should load data from disk to a page with phy addr,
    * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
    *
    *  Some Useful MACROs and DEFINEs, you can use them in below implementation.
    *  MACROs or Functions:
    *    swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
    *                               find the addr of disk page, read the content of disk page into this memroy page
    *    page_insert : build the map of phy addr of an Page with the linear addr la
    *    swap_map_swappable : set the page swappable
    */
    /*
     * LAB5 CHALLENGE ( the implmentation Copy on Write)
		There are 2 situlations when code comes here.
		  1) *ptep & PTE_P == 1, it means one process try to write a readonly page. 
		     If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep.
		     This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method).
		  2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a  swap entry.
		     We should add the LAB3's results here.
     */
        if(swap_init_ok) {
            struct Page *page=NULL;
                                    //(1)According to the mm AND addr, try to load the content of right disk page
                                    //    into the memory which page managed.
                                    //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
                                    //(3) make the page swappable.
                                    //(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution.
        }
        else {
            cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
            goto failed;
        }
   }
#endif
    // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
    // (notice the 3th parameter '1')
    if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
        cprintf("get_pte in do_pgfault failed\n");
        goto failed;
    }

    if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
        if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
            cprintf("pgdir_alloc_page in do_pgfault failed\n");
            goto failed;
        }
    }
    else {
        struct Page *page=NULL;
        cprintf("do pgfault: ptep %x, pte %x\n",ptep, *ptep);
        if (*ptep & PTE_P) {
            //if process write to this existed readonly page (PTE_P means existed), then should be here now.
            //we can implement the delayed memory space copy for fork child process (AKA copy on write, COW).
            //we didn't implement now, we will do it in future.
            panic("error write a non-writable pte");
            //page = pte2page(*ptep);
        } else {
           // if this pte is a swap entry, then load data from disk to a page with phy addr
           // and call page_insert to map the phy addr with logical addr
           if(swap_init_ok) {
               if ((ret = swap_in(mm, addr, &page)) != 0) {
                   cprintf("swap_in in do_pgfault failed\n");
                   goto failed;
               }
           }
           else {
               cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
               goto failed;
           }
       }
       page_insert(mm->pgdir, page, addr, perm);
       swap_map_swappable(mm, addr, page, 1);
       page->pra_vaddr = addr;
   }
   ret = 0;
failed:
    return ret;
}
Esempio n. 20
0
int handle_user_pagefault(void)
{
    phys_pagedir = active_process->mem.phys_pd;

    cr2 = asm_get_cr2();
    dpage = ADDR_TO_PAGE(cr2);
    doffset = eip - (char*) PAGE_TO_ADDR(dpage);
    dpde = dpage / MEMORY_PE_COUNT;
    dpte = dpage % MEMORY_PE_COUNT;

    eip = (void*) kernel_tasks.tss_for_active_thread.eip;
    cpage = ADDR_TO_PAGE(eip);
    coffset = eip - (char*) PAGE_TO_ADDR(cpage);
    cpde = cpage / MEMORY_PE_COUNT;
    cpte = cpage % MEMORY_PE_COUNT;

    const char *panic_msg;
    uint_t new_location;

    page_entry_t *pd, *pt;

    if (!(pd = temp_page_directory(phys_pagedir))) {
        goto no_pd_got;
    }
    if (!pd[dpde].pagenum) {
        goto no_pt_page;
    }
    if ((dpde >= KMEM_PDE_END) && !pd[dpde].user) {
        printf("User process = %d, thread = %d\n", active_pid, active_tid);
        printf("Trying to access address %p (page %d).\n", asm_get_cr2(), ADDR_TO_PAGE(asm_get_cr2()));
        printf("(!pd[dpde].user)\n");
        panic("Bug in memory handling!");
    }
    if (!pd[dpde].present) {
        new_location = swap_in(phys_pagedir, pd[dpde].pagenum);
        if (!new_location) {
            goto no_pt_page_swapped;
        }
        pd[dpde].pagenum = new_location;
        pd[dpde].present = 1;
    }
    if (!(pt = temp_page_table(pd[dpde].pagenum))) {
        goto no_pt_got;
    }
    if (dpde && dpte && !pt[dpte].pagenum) {
        goto no_cr2_page;
    }
    if (!pt[dpte].user) {
        return user_tries_kernel();
    }
    if (dpde < KMEM_PDE_END) {
        printf("User process = %d, thread = %d\n", active_pid, active_tid);
        printf("Trying to access address %p (page %d).\n", asm_get_cr2(), ADDR_TO_PAGE(asm_get_cr2()));
        printf("(dpde < KMEM_PDE_END) && pt[dpte].user)\n");
        panic("Bug in memory handling!");
        return user_tries_kernel();
    }
    if (!pt[dpte].present) {
        new_location = swap_in(phys_pagedir, pt[dpte].pagenum);
        if (!new_location) {
            goto no_cr2_page_swapped;
        }
        pt[dpte].pagenum = new_location;
        pt[dpte].present = 1;
    }

    return 0; // Ratkaistu. :)

no_pd_got:
    panic_msg = "Page fault: failed getting PD from RAM!";
    goto fail;
no_pt_page:
    panic_msg = "Page fault; page missing from PD!";
    goto fail;
no_pt_page_swapped:
    panic_msg = "Page fault; failed swapping PT to RAM!";
    goto fail;
no_pt_got:
    panic_msg = "Page fault; failed getting PT from RAM!";
    goto fail;
no_cr2_page:
    panic_msg = "Page fault; page missing from PT!";
    goto fail;
no_cr2_page_swapped:
    panic_msg = "Page fault; failed swapping page to RAM!";
    goto fail;

fail:
    printf("Page Fault!\nThread %i, process %i\n", active_tid, active_pid);
    printf("Trying to access address %p (page %d).\n", cr2, dpage);
    printf("%s\n", panic_msg);
    return -1;
}
Esempio n. 21
0
int
as_copy(struct addrspace *old, struct addrspace **ret)
{
    struct addrspace *newas;

    newas = as_create();
    if (newas==NULL) {
        return ENOMEM;
    }

    //	kprintf(" **** inside as copy ****  \n");
    //	spinlock_acquire(newas->as_splock);
    //	spinlock_acquire(old->as_splock);

    if(use_small_lock == true && swapping_started == true)
    {
        lock_acquire(newas->as_lock);
        lock_acquire(old->as_lock);
    }
    else if(use_big_lock == true && swapping_started == true)
        lock_acquire(as_lock);
    struct as_region* r_old = old->as_region_list;
    while(r_old != NULL)
    {
        struct as_region* r_new = (struct as_region*)kmalloc(sizeof(struct as_region));
        if(r_new == NULL)
        {
            if(use_big_lock == true && swapping_started == true)
                lock_release(as_lock);
            else if(use_small_lock == true && swapping_started == true)
            {
                lock_release(old->as_lock);
                lock_release(newas->as_lock);
            }
            //spinlock_release(old->as_splock);
            //spinlock_release(newas->as_splock);
            as_destroy(newas);
            return ENOMEM;
        }

        r_new->region_base = r_old->region_base;
        r_new->region_npages = r_old->region_npages;
        r_new->can_read = r_old->can_read;
        r_new->can_write = r_old->can_write;
        r_new->can_execute = r_old->can_execute;

        int ret = region_list_add_node(&newas->as_region_list,r_new);
        if(ret == -1)
        {
            if(use_big_lock == true && swapping_started == true)
                lock_release(as_lock);
            else if(use_small_lock == true  && swapping_started == true)
            {
                lock_release(old->as_lock);
                lock_release(newas->as_lock);
            }
            //	spinlock_release(old->as_splock);
            //	spinlock_release(newas->as_splock);
            as_destroy(newas);
            return ENOMEM;
        }
        r_old = r_old->next;
    }

    struct page_table_entry* p_old = old->as_page_list;
    while(p_old != NULL)
    {
        struct page_table_entry* p_new = (struct page_table_entry*)kmalloc(sizeof(struct page_table_entry));
        if(p_new == NULL)
        {
            if(use_big_lock == true && swapping_started == true)
                lock_release(as_lock);
            else if(use_small_lock == true && swapping_started == true)
            {
                lock_release(old->as_lock);
                lock_release(newas->as_lock);
            }
            //		spinlock_release(old->as_splock);
            //		spinlock_release(newas->as_splock);
            as_destroy(newas);

            return ENOMEM;
        }
        p_new->vaddr = p_old->vaddr;
        p_new->swap_pos = -1;

        KASSERT(p_old->page_state != SWAPPING);
        while(p_old->page_state == SWAPPING)
        {

            thread_yield();

        }

        //	if(!spinlock_do_i_hold)
        //	KASSERT(p_old->page_state != SWAPPING);

        if(p_old->page_state == MAPPED)
        {
            if(use_page_lock == true && swapping_started == true)
                lock_acquire(coremap[(p_old->paddr)/PAGE_SIZE].page_lock);

            if(p_old->page_state == MAPPED)
            {
                paddr_t paddr = get_user_page(p_old->vaddr, false, newas);
                KASSERT(p_old->page_state == MAPPED);
                //	int spl = splhigh();
                if(use_small_lock == true && swapping_started == true)
                {
                    if(lock_do_i_hold(newas->as_lock) == false)
                        lock_acquire(newas->as_lock);
                    if(lock_do_i_hold(old->as_lock) == false)
                        lock_acquire(newas->as_lock);
                }
                else if(use_big_lock == true && swapping_started == true)
                {
                    if(lock_do_i_hold(as_lock) == false)
                        lock_acquire(as_lock);
                }
                if(paddr == 0)
                {
                    if(use_big_lock == true && swapping_started == true)
                        lock_release(as_lock);
                    else if(use_small_lock == true && swapping_started == true)
                    {
                        lock_release(old->as_lock);
                        lock_release(newas->as_lock);
                    }
                    //				spinlock_release(old->as_splock);
                    //				spinlock_release(newas->as_splock);
                    as_destroy(newas);
                    return ENOMEM;
                }
                uint32_t old_index = p_old->paddr/PAGE_SIZE;
                KASSERT(coremap[old_index].is_victim == false);
                KASSERT(coremap[paddr/PAGE_SIZE].is_victim == false);
                memmove((void*)PADDR_TO_KVADDR(paddr),
                        (const void *)PADDR_TO_KVADDR(p_old->paddr), //use this? or PADDR_TO_KVADDR like dumbvm does?. But why does dumbvm do that in the first place.
                        PAGE_SIZE);									// i know why, cannot call functions on user memory addresses. So convert it into a kv address.
                // the function will translate it into a physical address again and free it. ugly Hack. but no other way.

                p_new->paddr = paddr;
                p_new->page_state = MAPPED;

                //	splx(spl);

                int ret = page_list_add_node(&newas->as_page_list,p_new);
                if(ret == -1)
                {
                    if(use_big_lock == true && swapping_started == true)
                        lock_release(as_lock);
                    else if(use_small_lock == true && swapping_started == true)
                    {
                        lock_release(old->as_lock);
                        lock_release(newas->as_lock);
                    }
                    //			spinlock_release(old->as_splock);
                    //			spinlock_release(newas->as_splock);
                    as_destroy(newas);
                    return ENOMEM;
                }

                if(use_page_lock == true && swapping_started == true)
                {

                    if(lock_do_i_hold(coremap[paddr/PAGE_SIZE].page_lock) == true)
                        lock_release(coremap[paddr/PAGE_SIZE].page_lock);


                    if(lock_do_i_hold(coremap[(p_old->paddr/PAGE_SIZE)].page_lock) == true)
                        lock_release(coremap[(p_old->paddr/PAGE_SIZE)].page_lock);
                }

            }
        }

        if(p_old->page_state == SWAPPED)
        {
            // this page is in disk, so we need to create a copy of that page somewhere in disk and then update the page table entry of the new process.
            // going with the disk->memory->disk approach suggested in a recitation video by jinghao shi.
            // Allocate a buffer at vm_bootstrap of size 4k (1 page). Use this buffer to temporarily copy data from disk to here and then to disk again
            // then clear the buffer. This buffer is a shared resource, so we need a lock around it.

            //	kprintf("in as_copy swap code \n");
            //	spinlock_release(old->as_splock);
            //	spinlock_release(newas->as_splock);
            swap_in(p_old->vaddr,old,copy_buffer_vaddr, p_old->swap_pos);
            //	kprintf("completed swap in \n");
            int pos = mark_swap_pos(p_new->vaddr, newas);
            KASSERT(pos != -1);
            int err = write_to_disk(KVADDR_TO_PADDR(copy_buffer_vaddr)/PAGE_SIZE, pos);
            //	kprintf("completed writing to disk \n");
            KASSERT(err == 0);
            //		spinlock_acquire(newas->as_splock);
            //		spinlock_acquire(old->as_splock);
            //	as_zero_region(KVADDR_TO_PADDR(copy_buffer_vaddr),1);
            p_new->page_state = SWAPPED;
            p_new->swap_pos = pos;
            p_new->paddr = 0;


            if(use_page_lock == true && swapping_started == true)
            {

                if(lock_do_i_hold(coremap[(p_old->paddr/PAGE_SIZE)].page_lock) == true)
                    lock_release(coremap[(p_old->paddr/PAGE_SIZE)].page_lock);
            }
        }
        p_old = p_old->next;

    }

    newas->as_heap_start = old->as_heap_start;
    newas->as_heap_end = old->as_heap_end;
    *ret = newas;


    if(use_big_lock == true && swapping_started == true)
        lock_release(as_lock);
    else if(use_small_lock == true && swapping_started == true)
    {
        lock_release(old->as_lock);
        lock_release(newas->as_lock);
    }

//	kprintf("exiting as copy \n");
    //	spinlock_release(old->as_splock);
    //	spinlock_release(newas->as_splock);
    return 0;
}
Esempio n. 22
0
/* do_pgfault - interrupt handler to process the page fault execption
 * @mm         : the control struct for a set of vma using the same PDT
 * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
 * @addr       : the addr which causes a memory access exception, (the contents of the CR2 register)
 *
 * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
 * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
 * the exception and recovering from it.
 *   (1) The contents of the CR2 register. The processor loads the CR2 register with the
 *       32-bit linear address that generated the exception. The do_pgfault fun can
 *       use this address to locate the corresponding page directory and page-table
 *       entries.
 *   (2) An error code on the kernel stack. The error code for a page fault has a format different from
 *       that for other exceptions. The error code tells the exception handler three things:
 *         -- The P flag   (bit 0) indicates whether the exception was due to a not-present page (0)
 *            or to either an access rights violation or the use of a reserved bit (1).
 *         -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
 *            was a read (0) or write (1).
 *         -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
 *            or supervisor mode (0) at the time of the exception.
 */
int
do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
    int ret = -E_INVAL;
    //try to find a vma which include addr
    struct vma_struct *vma = find_vma(mm, addr);

    pgfault_num++;
    //If the addr is in the range of a mm's vma?
    if (vma == NULL || vma->vm_start > addr) {
        cprintf("not valid addr %x, and  can not find it in vma\n", addr);
        goto failed;
    }
    //check the error_code
    switch (error_code & 3) {
    default:
            /* error code flag : default is 3 ( W/R=1, P=1): write, present */
    case 2: /* error code flag : (W/R=1, P=0): write, not present */
        if (!(vma->vm_flags & VM_WRITE)) {
            cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
            goto failed;
        }
        break;
    case 1: /* error code flag : (W/R=0, P=1): read, present */
        cprintf("do_pgfault failed: error code flag = read AND present\n");
        goto failed;
    case 0: /* error code flag : (W/R=0, P=0): read, not present */
        if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
            cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
            goto failed;
        }
    }
    /* IF (write an existed addr ) OR
     *    (write an non_existed addr && addr is writable) OR
     *    (read  an non_existed addr && addr is readable)
     * THEN
     *    continue process
     */
    
    uint32_t perm = PTE_U;
    if (vma->vm_flags & VM_WRITE) {
        perm |= PTE_W;
    }
    addr = ROUNDDOWN(addr, PGSIZE);

    ret = -E_NO_MEM;

    pte_t *ptep=NULL;
    /*LAB3 EXERCISE 1: YOUR CODE
    * Maybe you want help comment, BELOW comments can help you finish the code
    *
    * Some Useful MACROs and DEFINEs, you can use them in below implementation.
    * MACROs or Functions:
    *   get_pte : get an pte and return the kernel virtual address of this pte for la
    *             if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
    *   pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
    *             an addr map pa<--->la with linear address la and the PDT pgdir
    * DEFINES:
    *   VM_WRITE  : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
    *   PTE_W           0x002                   // page table/directory entry flags bit : Writeable
    *   PTE_U           0x004                   // page table/directory entry flags bit : User can access
    * VARIABLES:
    *   mm->pgdir : the PDT of these vma
    *
    */
//#if 0
    /*LAB3 EXERCISE 1: YOUR CODE*/
    ptep = get_pte(mm->pgdir, addr, 1);              //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
    if (ptep == NULL) {
        cprintf("get_pte failed in do_pgfault \n");
        goto failed;
    }
    //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
    if (*ptep == 0) {
        if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
            cprintf("pgdir_alloc_page failed in do_pgfault \n");
            goto failed;
        }
    }
    else {
    /*LAB3 EXERCISE 2: YOUR CODE
    * Now we think this pte is a  swap entry, we should load data from disk to a page with phy addr,
    * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
    *
    *  Some Useful MACROs and DEFINEs, you can use them in below implementation.
    *  MACROs or Functions:
    *    swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
    *                               find the addr of disk page, read the content of disk page into this memroy page
    *    page_insert : build the map of phy addr of an Page with the linear addr la
    *    swap_map_swappable : set the page swappable
    */
        if(swap_init_ok) {
            struct Page *page=NULL;
                                    //(1)According to the mm AND addr, try to load the content of right disk page
                                    //    into the memory which page managed.
            int r;
            r = swap_in(mm, addr, &page);
            if (r != 0) {
                cprintf("swap_in failed in do_pgfault \n");
                goto failed;
            }
                                    //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
            page_insert(mm->pgdir, page, addr, perm);
                                    //(3) make the page swappable.
            swap_map_swappable(mm, addr, page, 1);
            page->pra_vaddr = addr;
        }
        else {
            cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
            goto failed;
        }
   }
//#endif
   ret = 0;
failed:
    return ret;
}
Esempio n. 23
0
File: vmm.c Progetto: korepwx/pcore
/* do_pgfault - interrupt handler to process the page fault execption
 * @mm         : the control struct for a set of vma using the same PDT
 * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
 * @addr       : the addr which causes a memory access exception, (the contents of the CR2 register)
 *
 * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
 * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
 * the exception and recovering from it.
 *   (1) The contents of the CR2 register. The processor loads the CR2 register with the
 *       32-bit linear address that generated the exception. The do_pgfault fun can
 *       use this address to locate the corresponding page directory and page-table
 *       entries.
 *   (2) An error code on the kernel stack. The error code for a page fault has a format different from
 *       that for other exceptions. The error code tells the exception handler three things:
 *         -- The P flag   (bit 0) indicates whether the exception was due to a not-present page (0)
 *            or to either an access rights violation or the use of a reserved bit (1).
 *         -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
 *            was a read (0) or write (1).
 *         -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
 *            or supervisor mode (0) at the time of the exception.
 */
int vm_pgfault_handler(ProcVM *mm, uint32_t error_code, uintptr_t addr)
{
  int ret = -EINVAL;
  // Try to find a vma which include addr.
  ProcVMA *vma = vm_find_vma(mm, addr);
  ++vm_pgfault_count;
  
  //If the addr is in the range of a mm's vma?
  if (vma == NULL || vma->vm_start > addr) {
    printf("[vmm] pgfault_handler: not valid vma 0x%08x\n", addr);
    goto failed;
  }
  
  //check the error_code
  switch (error_code & 3) {
    /* error code flag : default is 3 (W/R=1, P=1): write, present */
    default:
    /* error code flag : (W/R=1, P=0): write, not present */
    case 2:
      if (!(vma->vm_flags & VM_WRITE)) {
        printf("[vmm] pgfault_handler: error code flag = write AND not present,"
               " but the addr's vma cannot write\n");
        goto failed;
      }
      break;
    /* error code flag : (W/R=0, P=1): read, present */
    case 1:
      printf("[vmm] pgfault_handler: error code flag = read AND present.\n");
      goto failed;
    /* error code flag : (W/R=0, P=0): read, not present */
    case 0:
      if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
        printf("[vmm] pgfault_handler: error code flag = read AND not present,"
               " but the addr's vma cannot read or exec.\n");
        goto failed;
      }
      break;
  }
  
  /* IF (write an existed addr ) OR
   *    (write an non_existed addr && addr is writable) OR
   *    (read  an non_existed addr && addr is readable)
   * THEN
   *    continue process
   */
  uint32_t perm = PTE_U;
  if (vma->vm_flags & VM_WRITE) {
    perm |= PTE_W;
  }
  addr = K_ROUND_DOWN(addr, PGSIZE); // Round to page margin.
  ret = -ENOMEM;
  
  // mm should be associated with particular process, so mm->pgdir here.
  pte_t *ptep = NULL;
  ptep = get_pte(mm->pgdir, addr, 1);
  if (ptep == NULL) {
    printf("[vmm] Cannot create page table entry for address 0x%08x\n", addr);
    goto failed;
  }
  
  // Page table entry does not exist, which indicates that this page is never 
  // created. So just create a new one.
  Page* page;

  if (*ptep == 0) {
    if ((page = pgdir_alloc_page(mm->pgdir, addr, perm)) == NULL) {
      printf("[vmm] Cannot create page for address 0x%08x\n", addr);
      goto failed;
    }
  }
  
  // Otherwise, the page was ever created, but is in swap at the moment.
  // So we need to load data from disk into the memory.
  else {
    if ((*ptep & PTE_P) == 0) {
      panic("[vmm] pgfault_handler: Page seems to be on swap, while swap is "
            "not enabled.");
#if 0
      // Check whether swap has been inited.
      if (!swap_init_ok) {
          cprintf("Swap is never initialized but a page [pte=%08x] on swap is requested.\n", *ptep);
          goto failed;
      }
      
      // Swep in the required page.
      if (swap_in(mm, addr, &page) != 0) {
          cprintf("Cannot swap in page 0x%08x.\n", *ptep);
          goto failed;
      }
      page_insert(mm->pgdir, page, addr, perm);
      
      // Register the new page to vmm manager.
      swap_map_swappable(mm, addr, page, 1);
#endif 
    }
  }
  
  ret = 0;
  
failed:
  return ret;
}