bool spage_table_load (struct spage_table_entry *spte, enum spage_types type) { // P3: reduce race condition with frame eviction process. spte->inevictable = true; if (spte->in_memory) return false; if (type == SWAP) { uint8_t *f = frame_alloc (PAL_USER, spte); if (!f) return false; if (!install_page (spte->upage, f, spte->writable)) { frame_free (f); return false; } swap_in (spte->swap_slot_id, spte->upage); spte->in_memory = true; } if (type == FILE || type == MMAP) { enum palloc_flags fg = PAL_USER; if (spte->file_read_bytes == 0) fg |= PAL_ZERO; uint8_t *f = frame_alloc (fg, spte); if (!f) return false; if (spte->file_read_bytes > 0) { lock_acquire (&lock_f); if ((int) spte->file_read_bytes != file_read_at (spte->file, f, spte->file_read_bytes, spte->file_offset)) { lock_release (&lock_f); frame_free (f); return false; } lock_release (&lock_f); memset (f + spte->file_read_bytes, 0, spte->file_zero_bytes); } if (!install_page (spte->upage, f, spte->writable)) { frame_free (f); return false; } spte->in_memory = true; } return true; }
bool grow_stack (void *uaddr) { void *upage = pg_round_down (uaddr); if((size_t)(PHYS_BASE - upage) > (1 << 23)) return false; struct spage_table_entry *spte = malloc (sizeof (struct spage_table_entry)); if (!spte) return false; spte->upage = upage; spte->in_memory = true; spte->writable = true; spte->spage_type = SWAP; spte->inevictable = true; uint8_t *f = frame_alloc (PAL_USER, spte); if (!f) { free (spte); return false; } if (!install_page (spte->upage, f, spte->writable)) { free (spte); frame_free (f); return false; } if (intr_context ()) spte->inevictable = false; return hash_insert (&thread_current ()->spage_table, &spte->elem) == NULL; }
/* * reload page back to a frame, given the slot index. * re-establish page mapping from given page user * virtual memory to the frame. */ struct frame_table_entry* swap_reload_pg(uint32_t slot_idx, uint8_t* pg_vaddr){ ASSERT(swap_blk!=NULL); bool already_locked = lock_held_by_current_thread (&swap_lock); if(!already_locked){ lock_acquire (&swap_lock); } // printf("reload> id=%d , slot_idx = %d\n",thread_current()->tid, slot_idx); ASSERT (bitmap_all (swap_table, slot_idx, 1)); struct frame_table_entry* fte = frame_get_new_page(true); fte->pg_vaddr = pg_vaddr; int i; for(i=0;i<8;i++){ block_read (swap_blk, slot_idx*8+i, fte->kpg_vaddr+i*BLOCK_SECTOR_SIZE); } // printf("swap_reload_pg An unassigned value look : %p\n",fte->pg_vaddr); if(!install_page (pg_vaddr, fte->kpg_vaddr, true)) PANIC("Unable to restore page from swap space!"); bitmap_set_multiple (swap_table, slot_idx, 1, false); if(!already_locked){ lock_release (&swap_lock); } fte->pinned = false; return fte; }
bool from_file (struct spage_entry *se) { struct thread *t = thread_current (); struct file *file = se->myfile; off_t ofs = se->ofs; uint8_t upage = se->upage; uint32_t read_bytes = se->read_bytes; uint32_t zero_bytes = se->zero_bytes; bool writable = se->writable; ASSERT ((read_bytes + zero_bytes) % PGSIZE == 0); ASSERT (pg_ofs (upage) == 0); ASSERT (ofs % PGSIZE == 0); // lock_acquire (&filesys_lock); file_seek (file, ofs); // lock_release (&filesys_lock); if ( !(read_bytes > 0 || zero_bytes > 0) ) return false; /* Calculate how to fill this page. We will read PAGE_READ_BYTES bytes from FILE and zero the final PAGE_ZERO_BYTES bytes. */ size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE; size_t page_zero_bytes = PGSIZE - page_read_bytes; /* Get a page of memory. */ uint8_t *kpage = palloc_get_page (PAL_USER); if (kpage == NULL) return false; /* Load this page. */ // lock_acquire (&filesys_lock); if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes) { // lock_release (&filesys_lock); palloc_free_page (kpage); return false; } // lock_release (&filesys_lock); memset (kpage + page_read_bytes, 0, page_zero_bytes); /* Add the page to the process's address space. */ if (!install_page (upage, kpage, writable)) { palloc_free_page (kpage); return false; } return true; }
static void setup_identity(pgd_t *pgtable, phys_addr_t start_addr, phys_addr_t end_addr) { phys_addr_t cur; start_addr &= PAGE_MASK; for (cur = start_addr; true; cur += PAGE_SIZE) { if (start_addr < end_addr && cur >= end_addr) break; if (start_addr > end_addr && cur <= end_addr) break; install_page(pgtable, cur, __va(cur)); } }
/** * \page_load_demand * \Load page on demand (call only fault occurred) * * \param spte Supplemental page table that fault occurred * \param paddr physical address to be mapped * * \retval true if success * \retval false if failed */ bool page_load_demand (struct page_entry *spte, void *paddr) { /* Read read_bytes from offset written in spte */ if (file_read_at (spte->file, paddr, spte->read_bytes, spte->ofs) != (int) spte->read_bytes) return false; /* Add zero padding */ memset (paddr + spte->read_bytes, 0, spte->zero_bytes); /* Set this page is loaded in memory */ spte->is_loaded = true; /* Install vaddr and paddr mapping into pagedir */ return install_page (spte->vaddr, paddr, spte->writable); }
/* Create a minimal stack by mapping a zeroed page at the top of user virtual memory. */ static bool setup_stack (void **esp) { uint8_t *kpage; bool success = false; kpage = palloc_get_page (PAL_USER | PAL_ZERO); if (kpage != NULL) { success = install_page (((uint8_t *) PHYS_BASE) - PGSIZE, kpage, true); if (success) *esp = PHYS_BASE; else palloc_free_page (kpage); } return success; }
/* Loads a segment starting at offset OFS in FILE at address UPAGE. In total, READ_BYTES + ZERO_BYTES bytes of virtual memory are initialized, as follows: - READ_BYTES bytes at UPAGE must be read from FILE starting at offset OFS. - ZERO_BYTES bytes at UPAGE + READ_BYTES must be zeroed. The pages initialized by this function must be writable by the user process if WRITABLE is true, read-only otherwise. Return true if successful, false if a memory allocation error or disk read error occurs. */ static bool load_segment (struct file *file, off_t ofs, uint8_t *upage, uint32_t read_bytes, uint32_t zero_bytes, bool writable) { ASSERT ((read_bytes + zero_bytes) % PGSIZE == 0); ASSERT (pg_ofs (upage) == 0); ASSERT (ofs % PGSIZE == 0); file_seek (file, ofs); while (read_bytes > 0 || zero_bytes > 0) { /* Calculate how to fill this page. We will read PAGE_READ_BYTES bytes from FILE and zero the final PAGE_ZERO_BYTES bytes. */ size_t page_read_bytes = read_bytes < PGSIZE ? read_bytes : PGSIZE; size_t page_zero_bytes = PGSIZE - page_read_bytes; /* Get a page of memory. */ uint8_t *kpage = palloc_get_page (PAL_USER); if (kpage == NULL) return false; /* Load this page. */ if (file_read (file, kpage, page_read_bytes) != (int) page_read_bytes) { palloc_free_page (kpage); return false; } memset (kpage + page_read_bytes, 0, page_zero_bytes); /* Add the page to the process's address space. */ if (!install_page (upage, kpage, writable)) { palloc_free_page (kpage); return false; } /* Advance. */ read_bytes -= page_read_bytes; zero_bytes -= page_zero_bytes; upage += PGSIZE; } return true; }
/** * \page_install_page * \Install page_entry structure into page table. * \Note that this function immediately install physical memory (not lazy) * * \param upage virtual address of user process * \param kpage kernal virtual address (physical address) to be installed * \param writable indication of read only or not * \param flags palloc flags (to be used for swap in) * \param type type of page (MEM, FILE, ...) * * \retval true on success * \retval fail on fail */ bool page_install_page (void *upage, void *kpage, bool writable, enum palloc_flags flags, enum page_type type) { struct thread *curr = thread_current (); /* Allocate memory, return false if not successfully allocated */ lock_acquire (&curr->page_lock); struct page_entry *pe = malloc (sizeof (struct page_entry)); if (pe == NULL) { lock_release (&curr->page_lock); return false; } /* Write user page address */ pe->vaddr = upage; /* Write type and flags */ pe->type = type; pe->flags = flags; /* Write protection bit */ pe->writable = writable; /* As this function load page not lazily, set is_loaded value to true */ pe->is_loaded = true; /* Insert page entry to sup page table */ hash_insert (&curr->page_table, &pe->elem); /* Add address mapping to original page table. * Install page is implemented in userprog/process.c */ bool result = install_page (upage, kpage, writable); lock_release (&curr->page_lock); /* Return result of install_page */ return result; }
/* Obtains a single free page and returns its kernel virtual address. If PAL_USER is set, the page is obtained from the user pool, otherwise from the kernel pool. If PAL_ZERO is set in FLAGS, then the page is filled with zeros. If no pages are available, returns a null pointer, unless PAL_ASSERT is set in FLAGS, in which case the kernel panics. */ void * palloc_page (enum palloc_flags flags, void * upage, bool writable) { bool success = false; if(!(flags & PAL_USER)) PANIC ("This function cannot be called without PAL_USER flag set\n"); struct pool *pool = &frame_table.frame_pool; size_t page_idx; // empty page, and set to used lock_acquire (&pool->lock); page_idx = bitmap_scan_and_flip (pool->used_map, 0, 1, false); lock_release (&pool->lock); /* sets the members of cur_frame struct after is it allocated */ struct frame_entry * cur_frame; cur_frame = frame_table.frames + page_idx; cur_frame->pid = thread_current()->tid; cur_frame->page_num = pg_no(upage); cur_frame->reference = true; cur_frame->dirty = false; cur_frame->resident = true; success = install_page (upage, cur_frame->kpage, writable); if(DBG)printf("success after in mapping upage %p to kpage %p in palloc page = %d\n", upage, cur_frame->kpage, success); // execute flags if(!success) { if (flags & PAL_ASSERT) PANIC ("palloc_get: out of pages"); // change this? return NULL; } else if (flags & PAL_ZERO) memset (upage, 0, PGSIZE); return cur_frame->kpage; }