/* * Allocates a frame to be used for the virtual page represented by p. * If all frames are in use, calls the replacement algorithm's evict_fcn to * select a victim frame. Writes victim to swap if needed, and updates * pagetable entry for victim to indicate that virtual page is no longer in * (simulated) physical memory. * * Counters for evictions should be updated appropriately in this function. */ int allocate_frame(pgtbl_entry_t *p) { int i; int frame = -1; for(i = 0; i < memsize; i++) { if(!coremap[i].in_use) { frame = i; break; } } if(frame == -1) { // Didn't find a free page. // Call replacement algorithm's evict function to select victim frame = evict_fcn(); // All frames were in use, so victim frame must hold some page // Write victim page to swap, if needed, and update pagetable // IMPLEMENTATION NEEDED // Get the offset and the frame number int swap_offset = (int) coremap[frame].pte->swap_off; // Write the victim page into swap int swap_offset_page = swap_pageout(frame, swap_offset); assert(swap_offset_page != INVALID_SWAP); // Update the coremap coremap[frame].pte->frame = coremap[frame].pte->frame & ~PG_VALID; coremap[frame].pte->swap_off = (off_t) swap_offset_page; coremap[frame].pte->frame = coremap[frame].pte->frame | PG_ONSWAP; // Find the dirty bit unsigned int dirty = coremap[frame].pte->frame & PG_DIRTY; if (dirty) { // Increment dirty eviction evict_dirty_count++; } else { evict_clean_count++; } } // Update the page table entry p->frame = frame << PAGE_SHIFT; // Record information for virtual page that will now be stored in frame coremap[frame].in_use = 1; coremap[frame].pte = p; return frame; }
/* * Allocates a frame to be used for the virtual page represented by p. * If all frames are in use, calls the replacement algorithm's evict_fcn to * select a victim frame. Writes victim to swap if needed, and updates * pagetable entry for victim to indicate that virtual page is no longer in * (simulated) physical memory. * * Counters for evictions should be updated appropriately in this function. */ int allocate_frame(pgtbl_entry_t *p) { int i; int frame = -1; for(i = 0; i < memsize; i++) { if(!coremap[i].in_use) { frame = i; break; } } if(frame == -1) { // Didn't find a free page. // Call replacement algorithm's evict function to select victim frame = evict_fcn(); // All frames were in use, so victim frame must hold some page // Write victim page to swap, if needed, and update pagetable // IMPLEMENTATION NEEDED // Take pte from coremap with given frame pgtbl_entry_t *pte = coremap[frame].pte; // Swap old one with the given frame if(pte->frame & PG_DIRTY){ pte->swap_off = swap_pageout(frame, pte->swap_off); pte->frame = pte->frame & ~PG_DIRTY; pte->frame = pte->frame | PG_ONSWAP; // Increase count for evict_dirty evict_dirty_count++; }else{ // Increase count for evict_clean evict_clean_count++; } // Frame is invalid pte->frame = pte->frame & ~PG_VALID; } // Record information for virtual page that will now be stored in frame coremap[frame].in_use = 1; coremap[frame].pte = p; coremap[frame].length = p->length; return frame; }
/* * lpage_evict: Evict an lpage from physical memory. * * Synchronization: lock the lpage while evicting it. We come here * from the coremap and should * have pinned the physical page. This is why we must not hold lpage * locks while entering the coremap code. */ void lpage_evict(struct lpage *lp) { KASSERT(lp != NULL); lpage_lock(lp); KASSERT(lp->lp_paddr != INVALID_PADDR); KASSERT(lp->lp_swapaddr != INVALID_SWAPADDR); /* if the page is dirty, swap_pageout */ if (LP_ISDIRTY(lp)) { lpage_unlock(lp); // release lock before doing I/O KASSERT(lock_do_i_hold(global_paging_lock)); KASSERT(coremap_pageispinned(lp->lp_paddr)); swap_pageout((lp->lp_paddr & PAGE_FRAME), lp->lp_swapaddr); lpage_lock(lp); KASSERT((lp->lp_paddr & PAGE_FRAME) != INVALID_PADDR); /* update stats */ spinlock_acquire(&stats_spinlock); ct_write_evictions++; DEBUG (DB_VM, "lpage_evict: evicting Dirty page 0x%x\n", (lp->lp_paddr & PAGE_FRAME)); spinlock_release(&stats_spinlock); } else { /* if page is clean, just update stats */ spinlock_acquire(&stats_spinlock); ct_discard_evictions++; DEBUG (DB_VM, "lpage_evict: evicting Clean page 0x%x\n", (lp->lp_paddr & PAGE_FRAME)); spinlock_release(&stats_spinlock); } /* modify PTE to indicate that the page is no longer in memory. */ lp->lp_paddr = INVALID_PADDR; lpage_unlock(lp); }
/* * lpage_evict: Evict an lpage from physical memory. * * Synchronization: lock the lpage while accessing it. We come here * from the coremap and should have the global paging lock and should * have pinned the physical page (see coremap.c:do_evict()). * This is why we must not hold lpage locks while entering the coremap code. * * Similar to lpage_fault, the lpage lock should not be held while performing * the page out (if one is needed). */ void lpage_evict(struct lpage *lp) { paddr_t pa; off_t swapaddr; KASSERT(lock_do_i_hold(global_paging_lock)); KASSERT(lp != NULL); lpage_lock(lp); swapaddr = lp->lp_swapaddr; pa = lp->lp_paddr & PAGE_FRAME; KASSERT(pa != INVALID_PADDR); if (LP_ISDIRTY(lp)) { lpage_unlock(lp); LP_CLEAR(lp, LPF_DIRTY); swap_pageout(pa, swapaddr); lpage_lock(lp); } lp->lp_paddr = INVALID_PADDR; lpage_unlock(lp); }
/* * lpage_evict: Evict an lpage from physical memory. * * Synchronization: lock the lpage while accessing it. We come here * from the coremap and should have the global paging lock and should * have pinned the physical page (see coremap.c:do_evict()). * This is why we must not hold lpage locks while entering the coremap code. * * Similar to lpage_fault, the lpage lock should not be held while performing * the page out (if one is needed). */ void lpage_evict(struct lpage *lp) { paddr_t physical_address; off_t swap_address; // Lock the lpage while accessing it. KASSERT(lp != NULL); lpage_lock(lp); // Obtain the physical & swap address' physical_address = lp->lp_paddr & PAGE_FRAME; swap_address = lp->lp_swapaddr; // If the page is stored in RAM memory... if (physical_address != INVALID_PADDR) { DEBUG(DB_VM, "lpage_evict: Moving page from paddr 0x%x to swapaddr 0x%llx\n", physical_address, swap_address); // If page is dirty.. if (LP_ISDIRTY(lp)) { // Move page into swapspace. lpage_unlock(lp); swap_pageout(physical_address, swap_address); LP_CLEAR(lp, LPF_DIRTY); lpage_lock(lp); } // Remove page from physical memory. lp->lp_paddr = INVALID_PADDR; lpage_unlock(lp); } else { lpage_unlock(lp); } }