/* * Locate the physical frame number for the given vaddr using the page table. * * If the entry is invalid and not on swap, then this is the first reference * to the page and a (simulated) physical frame should be allocated and * initialized (using init_frame). * * If the entry is invalid and on swap, then a (simulated) physical frame * should be allocated and filled by reading the page data from swap. * * Counters for hit, miss and reference events should be incremented in * this function. */ char *find_physpage(addr_t vaddr, char type) { pgtbl_entry_t *p=NULL; // pointer to the full page table entry for vaddr unsigned idx = PGDIR_INDEX(vaddr); // get index into page directory // IMPLEMENTATION NEEDED // Use top-level page directory to get pointer to 2nd-level page table if (!(pgdir[idx].pde & PG_VALID)){ pgdir[idx] = init_second_level(); } // Use vaddr to get index into 2nd-level page table and initialize 'p' uintptr_t ptr_table = PAGE_MASK & pgdir[idx].pde; p = (pgtbl_entry_t*)(ptr_table) + PGTBL_INDEX(vaddr); // Check if p is valid or not, on swap or not, and handle appropriately // Page is present in the memory if (p->frame & PG_VALID){ hit_count++; // Page is not present in the memory } else { miss_count++; ref_count++; int frame = allocate_frame(p); // Page on disk. if (p->frame & PG_ONSWAP){ swap_pagein(frame, p->swap_off); p->frame = frame << PAGE_SHIFT; p->frame = p->frame | PG_VALID; // Page not on disk, first time access the page } else { init_frame(frame, vaddr); p->frame = frame << PAGE_SHIFT; p->frame = p->frame | PG_DIRTY; } } // Make sure that p is marked valid. Also mark it dirty // if the access type indicates that the page will be written to p->frame = p->frame | PG_VALID; if(type == 'S' || type == 'M'){ p->frame = p->frame | PG_DIRTY; } // Call replacement algorithm's ref_fcn for this page ref_fcn(p); // p is marked referenced. p->frame = p->frame | PG_REF; // Return pointer into (simulated) physical memory at start of frame return &physmem[(p->frame >> PAGE_SHIFT)*SIMPAGESIZE]; }
/* * lpage_fault - handle a fault on a specific lpage. If the page is * not resident, get a physical page from coremap and swap it in. * * You do not yet need to distinguish a readonly fault from a write * fault. When we implement sharing, there will be a difference. * * Synchronization: Lock the lpage while checking if it's in memory. * If it's not, unlock the page while allocting space and loading the * page in. This only works because lpages are not currently sharable. * The page should be locked again as soon as it is loaded, but be * careful of interactions with other locks while modifying the coremap. * * After it has been loaded, the page must be pinned so that it is not * evicted while changes are made to the TLB. It can be unpinned as soon * as the TLB is updated. */ int lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va) { paddr_t pa = lp->lp_paddr & PAGE_FRAME; off_t swap = lp->lp_swapaddr; int writable = 0; //lock the page lpage_lock_and_pin(lp); //If the page is not in RAM, load into RAM if(pa == INVALID_PADDR) { //unlock the page if its not lpage_unlock(lp); //allocate a page and pin it pa = coremap_allocuser(lp); if(pa == INVALID_PADDR) { coremap_unpin(lp->lp_paddr & PAGE_FRAME); return ENOMEM; } //assert the page is pinned and lock KASSERT(coremap_pageispinned(pa)); lock_acquire(global_paging_lock); //fetch from disk and put in RAM swap_pagein(pa, swap); //release locks lpage_lock(lp); lock_release(global_paging_lock); //make sure nobody else paged in the page KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); //set the pages new phyiscal address lp->lp_paddr = pa; } if(faulttype == VM_FAULT_WRITE || faulttype == VM_FAULT_READONLY) { LP_SET(lp, LPF_DIRTY); writable = 1; } //put a mapping into the TLB /*if(coremap_pageispinned(lp->lp_paddr) == 0) { DEBUG(DB_VM, "Page is unpinned!"); }*/ mmu_map(as, va, pa, writable); lpage_unlock(lp); return 0; }
/* * lpage_fault - handle a fault on a specific lpage. If the page is * not resident, get a physical page from coremap and swap it in. * * You do not yet need to distinguish a readonly fault from a write * fault. When we implement sharing, there will be a difference. * * Synchronization: Lock the lpage while checking if it's in memory. * If it's not, unlock the page while allocting space and loading the * page in. This only works because lpages are not currently sharable. * The page should be locked again as soon as it is loaded, but be * careful of interactions with other locks while modifying the coremap. * * After it has been loaded, the page must be pinned so that it is not * evicted while changes are made to the TLB. It can be unpinned as soon * as the TLB is updated. */ int lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va) { paddr_t pa, swa; /* Pin the physical page and lock the lpage. */ lpage_lock_and_pin(lp); // Get the physical address pa = lp->lp_paddr & PAGE_FRAME; // If the page is not in memeory, get it from swap if (pa == INVALID_PADDR) { swa = lp->lp_swapaddr; lpage_unlock(lp); // Have a page frame allocated pa = coremap_allocuser(lp); if (pa == INVALID_PADDR) { coremap_unpin(lp->lp_paddr & PAGE_FRAME); lpage_destroy(lp); return ENOMEM; } KASSERT(coremap_pageispinned(pa)); lock_acquire(global_paging_lock); // Add page contents from swap to physical memory swap_pagein(pa, swa); lpage_lock(lp); lock_release(global_paging_lock); /* Assert nobody else did the pagein. */ KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); lp->lp_paddr = pa; } //Update TLB switch (faulttype){ case VM_FAULT_READONLY: mmu_map(as, va, pa, 0); break; case VM_FAULT_READ: case VM_FAULT_WRITE: // Set it to dirty LP_SET(lp, LPF_DIRTY); mmu_map(as, va, pa, 1); } // Already unpinned in mmu_map lpage_unlock(lp); return 0; }
/* * Locate the physical frame number for the given vaddr using the page table. * * If the entry is invalid and not on swap, then this is the first reference * to the page and a (simulated) physical frame should be allocated and * initialized (using init_frame). * * If the entry is invalid and on swap, then a (simulated) physical frame * should be allocated and filled by reading the page data from swap. * * Counters for hit, miss and reference events should be incremented in * this function. */ char *find_physpage(addr_t vaddr, char type) { pgtbl_entry_t *p=NULL; // pointer to the full page table entry for vaddr unsigned idx = PGDIR_INDEX(vaddr); // get index into page directory // IMPLEMENTATION NEEDED // Use top-level page directory to get pointer to 2nd-level page table // Check if it's valid unsigned int valid_pde = pgdir[idx].pde & PG_VALID; // Not Valid, initialize a 2nd pagetable if (!valid_pde) { pgdir[idx] = init_second_level(); } // Use vaddr to get index into 2nd-level page table and initialize 'p' unsigned pgtbl_index = PGTBL_INDEX(vaddr); p = (pgtbl_entry_t *) (pgdir[idx].pde & PAGE_MASK); p = p + pgtbl_index; // Check if p is valid or not, on swap or not, and handle appropriately // Find the valid bit and the on-swap bit unsigned int valid = p->frame & PG_VALID; unsigned int on_swap = p->frame & PG_ONSWAP; // Check if it's valid if (!valid) { // Allocate a frame for p int frame = allocate_frame(p); // Update fields for OPT coremap[frame].pgtbl_idx = (int) pgtbl_index; coremap[frame].pgdir_idx = (int) idx; // On swap if (on_swap) { // Read the page from swap int success = swap_pagein(frame, p->swap_off); assert(success == 0); } // Not on swap if (!on_swap) { // Initialize the frame init_frame(frame, vaddr); // Mark it as dirty p->frame = p->frame | PG_DIRTY; } // Not Valid, so increment miss miss_count++; } // Make sure that p is marked valid and referenced. Also mark it // dirty if the access type indicates that the page will be written to. // Mark p as valid and referenced p->frame = p->frame | PG_VALID; p->frame = p->frame | PG_REF; unsigned int check = p->frame & PG_VALID; assert(check); // Check the access type if ((type == 'S') || (type == 'M')) { // Set the dirty bit p->frame = p->frame | PG_DIRTY; } // Increment ref, hit if (valid) { hit_count++; } ref_count++; // Call replacement algorithm's ref_fcn for this page ref_fcn(p); // Return pointer into (simulated) physical memory at start of frame return &physmem[(p->frame >> PAGE_SHIFT)*SIMPAGESIZE]; }
// Copies Logical Page from fromlp to tolp. int lp_copy (struct lpage *fromlp, struct lpage **tolp) { struct lpage *newlp = NULL; paddr_t frompa; paddr_t topa; off_t swapaddr; int result; DEBUG(DB_VM, "LPage: lp_copy\n"); lock_acquire(fromlp -> lock); frompa = fromlp -> paddr & PAGE_FRAME; if (frompa == INVALID_PADDR) { swapaddr = fromlp -> swapaddr; lock_release(fromlp -> lock); frompa = cm_allocuserpage(fromlp); if (frompa == INVALID_PADDR) { return (ENOMEM); } KASSERT(cm_pageispinned(frompa)); lock_acquire(paging_lock); swap_pagein(frompa, swapaddr); lock_acquire(fromlp -> lock); lock_release(paging_lock); KASSERT((fromlp -> paddr & PAGE_FRAME) == INVALID_PADDR); fromlp -> paddr = frompa | LPF_LOCKED; } else { cm_pin(frompa); } KASSERT(cm_pageispinned(frompa)); result = lp_setup(&newlp, &topa); if (result) { cm_unpin(frompa); lock_release(fromlp -> lock); return (result); } KASSERT(cm_pageispinned(topa)); KASSERT(cm_pageispinned(frompa)); cm_copypage(frompa, topa); cm_unpin(topa); cm_unpin(frompa); lock_release(fromlp -> lock); lock_release(newlp -> lock); *tolp = newlp; return (0); }
/* * lpage_fault - handle a fault on a specific lpage. If the page is * not resident, get a physical page from coremap and swap it in. * * You do not yet need to distinguish a readonly fault from a write * fault. When we implement sharing, there will be a difference. * * Synchronization: Lock the lpage while checking if it's in memory. * If it's not, unlock the page while allocating space and loading the * page in. This only works because lpages are not currently sharable. * The page should be locked again as soon as it is loaded, but be * careful of interactions with other locks while modifying the coremap. * * After it has been loaded, the page must be pinned so that it is not * evicted while changes are made to the TLB. It can be unpinned as soon * as the TLB is updated. */ int lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va) { KASSERT(lp != NULL); // kernel pages never get paged out, thus never fault lock_acquire(global_paging_lock); if ((lp->lp_paddr & PAGE_FRAME) != INVALID_PADDR) { lpage_lock_and_pin(lp); } else { lpage_lock(lp); } lock_release(global_paging_lock); KASSERT(lp->lp_swapaddr != INVALID_SWAPADDR); paddr_t pa = lp->lp_paddr; int writable; // 0 if page is read-only, 1 if page is writable /* case 1 - minor fault: the frame is still in memory */ if ((pa & PAGE_FRAME) != INVALID_PADDR) { /* make sure it's a minor fault */ KASSERT(pa != INVALID_PADDR); /* Setting the TLB entry's dirty bit */ writable = (faulttype != VM_FAULT_READ); /* update stats */ spinlock_acquire(&stats_spinlock); ct_minfaults++; DEBUG(DB_VM, "\nlpage_fault: minor faults = %d.", ct_minfaults); spinlock_release(&stats_spinlock); } else { /* case 2 - major fault: the frame was swapped out to disk */ /* make sure it is a major fault */ KASSERT(pa == INVALID_PADDR); /* allocate a new frame */ lpage_unlock(lp); // must not hold lpage locks before entering coremap pa = coremap_allocuser(lp); // do evict if needed, also pin coremap if ((pa & PAGE_FRAME)== INVALID_PADDR) { DEBUG(DB_VM, "lpage_fault: ENOMEM: va=0x%x\n", va); return ENOMEM; } KASSERT(coremap_pageispinned(pa)); /* retrieving the content from disk */ lock_acquire(global_paging_lock); // because swap_pagein needs it swap_pagein((pa & PAGE_FRAME), lp->lp_swapaddr); // coremap is already pinned above lpage_lock(lp); lock_release(global_paging_lock); /* assert that nobody else did the pagein */ KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); /* now update PTE with new PFN */ lp->lp_paddr = pa ; // page is clean /* Setting the TLB entry's dirty bit */ writable = 0; // this way we can detect the first write to a page /* update stats */ spinlock_acquire(&stats_spinlock); ct_majfaults++; DEBUG(DB_VM, "\nlpage_fault: MAJOR faults = %d", ct_majfaults); spinlock_release(&stats_spinlock); } /* check preconditions before update TLB/PTE */ KASSERT(coremap_pageispinned(lp->lp_paddr)); KASSERT(spinlock_do_i_hold(&lp->lp_spinlock)); /* PTE entry is dirty if the instruction is a write */ if (writable) { LP_SET(lp, LPF_DIRTY); } /* Put the new TLB entry into the TLB */ KASSERT(coremap_pageispinned(lp->lp_paddr)); // done in both cases of above IF clause mmu_map(as, va, lp->lp_paddr, writable); // update TLB and unpin coremap lpage_unlock(lp); return 0; }
/* * lpage_copy: create a new lpage and copy data from another lpage. * * The synchronization for this is kind of unpleasant. We do it like * this: * * 1. Create newlp. * 2. Materialize a page for newlp, so it's locked and pinned. * 3. Lock and pin oldlp. * 4. Extract the physical address and swap address. * 5. If oldlp wasn't present, * 5a. Unlock oldlp. * 5b. Page in. * 5c. This pins the page in the coremap. * 5d. Leave the page pinned and relock oldlp. * 5e. Assert nobody else paged the page in. * 6. Copy. * 7. Unlock the lpages first, so we can enter the coremap. * 8. Unpin the physical pages. * */ int lpage_copy(struct lpage *oldlp, struct lpage **lpret) { struct lpage *newlp; paddr_t newpa, oldpa; off_t swa; int result; result = lpage_materialize(&newlp, &newpa); if (result) { return result; } KASSERT(coremap_pageispinned(newpa)); /* Pin the physical page and lock the lpage. */ lpage_lock_and_pin(oldlp); oldpa = oldlp->lp_paddr & PAGE_FRAME; /* * If there is no physical page, we allocate one, which pins * it, and then (re)lock the lpage. Since we are single- * threaded (if we weren't, we'd hold the address space lock * to exclude sibling threads) nobody else should have paged * the page in behind our back. */ if (oldpa == INVALID_PADDR) { /* * XXX this is mostly copied from lpage_fault */ swa = oldlp->lp_swapaddr; lpage_unlock(oldlp); oldpa = coremap_allocuser(oldlp); if (oldpa == INVALID_PADDR) { coremap_unpin(newlp->lp_paddr & PAGE_FRAME); lpage_destroy(newlp); return ENOMEM; } KASSERT(coremap_pageispinned(oldpa)); lock_acquire(global_paging_lock); swap_pagein(oldpa, swa); lpage_lock(oldlp); lock_release(global_paging_lock); /* Assert nobody else did the pagein. */ KASSERT((oldlp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); oldlp->lp_paddr = oldpa; } KASSERT(coremap_pageispinned(oldpa)); coremap_copy_page(oldpa, newpa); KASSERT(LP_ISDIRTY(newlp)); lpage_unlock(oldlp); lpage_unlock(newlp); coremap_unpin(newpa); coremap_unpin(oldpa); *lpret = newlp; return 0; }