/* * lpage_zerofill: create a new lpage and arrange for it to be cleared * to all zeros. The current implementation causes the lpage to be * resident upon return, but this is not a guaranteed property, and * nothing prevents the page from being evicted before it is used by * the caller. * * Synchronization: coremap_allocuser returns the new physical page * "pinned" (locked) - we hold that lock while we update the page * contents and the necessary lpage fields. Unlock the lpage before * unpinning, so it's safe to take the coremap spinlock. */ int lpage_zerofill(struct lpage **lpret) { struct lpage *lp; paddr_t pa; int result; result = lpage_materialize(&lp, &pa); if (result) { return result; } KASSERT(spinlock_do_i_hold(&lp->lp_spinlock)); KASSERT(coremap_pageispinned(pa)); /* Don't actually need the lpage locked. */ lpage_unlock(lp); coremap_zero_page(pa); KASSERT(coremap_pageispinned(pa)); coremap_unpin(pa); spinlock_acquire(&stats_spinlock); ct_zerofills++; spinlock_release(&stats_spinlock); *lpret = lp; return 0; }
void coremap_copy_page(paddr_t oldpaddr, paddr_t newpaddr) { vaddr_t oldva, newva; KASSERT(oldpaddr != newpaddr); KASSERT(coremap_pageispinned(oldpaddr)); KASSERT(coremap_pageispinned(newpaddr)); oldva = PADDR_TO_KVADDR(oldpaddr); newva = PADDR_TO_KVADDR(newpaddr); memcpy((char *)newva, (char *)oldva, PAGE_SIZE); }
/* * lpage_fault - handle a fault on a specific lpage. If the page is * not resident, get a physical page from coremap and swap it in. * * You do not yet need to distinguish a readonly fault from a write * fault. When we implement sharing, there will be a difference. * * Synchronization: Lock the lpage while checking if it's in memory. * If it's not, unlock the page while allocting space and loading the * page in. This only works because lpages are not currently sharable. * The page should be locked again as soon as it is loaded, but be * careful of interactions with other locks while modifying the coremap. * * After it has been loaded, the page must be pinned so that it is not * evicted while changes are made to the TLB. It can be unpinned as soon * as the TLB is updated. */ int lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va) { paddr_t pa = lp->lp_paddr & PAGE_FRAME; off_t swap = lp->lp_swapaddr; int writable = 0; //lock the page lpage_lock_and_pin(lp); //If the page is not in RAM, load into RAM if(pa == INVALID_PADDR) { //unlock the page if its not lpage_unlock(lp); //allocate a page and pin it pa = coremap_allocuser(lp); if(pa == INVALID_PADDR) { coremap_unpin(lp->lp_paddr & PAGE_FRAME); return ENOMEM; } //assert the page is pinned and lock KASSERT(coremap_pageispinned(pa)); lock_acquire(global_paging_lock); //fetch from disk and put in RAM swap_pagein(pa, swap); //release locks lpage_lock(lp); lock_release(global_paging_lock); //make sure nobody else paged in the page KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); //set the pages new phyiscal address lp->lp_paddr = pa; } if(faulttype == VM_FAULT_WRITE || faulttype == VM_FAULT_READONLY) { LP_SET(lp, LPF_DIRTY); writable = 1; } //put a mapping into the TLB /*if(coremap_pageispinned(lp->lp_paddr) == 0) { DEBUG(DB_VM, "Page is unpinned!"); }*/ mmu_map(as, va, pa, writable); lpage_unlock(lp); return 0; }
void coremap_zero_page(paddr_t paddr) { vaddr_t va; KASSERT(coremap_pageispinned(paddr)); va = PADDR_TO_KVADDR(paddr); bzero((char *)va, PAGE_SIZE); }
/* * lpage_fault - handle a fault on a specific lpage. If the page is * not resident, get a physical page from coremap and swap it in. * * You do not yet need to distinguish a readonly fault from a write * fault. When we implement sharing, there will be a difference. * * Synchronization: Lock the lpage while checking if it's in memory. * If it's not, unlock the page while allocting space and loading the * page in. This only works because lpages are not currently sharable. * The page should be locked again as soon as it is loaded, but be * careful of interactions with other locks while modifying the coremap. * * After it has been loaded, the page must be pinned so that it is not * evicted while changes are made to the TLB. It can be unpinned as soon * as the TLB is updated. */ int lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va) { paddr_t pa, swa; /* Pin the physical page and lock the lpage. */ lpage_lock_and_pin(lp); // Get the physical address pa = lp->lp_paddr & PAGE_FRAME; // If the page is not in memeory, get it from swap if (pa == INVALID_PADDR) { swa = lp->lp_swapaddr; lpage_unlock(lp); // Have a page frame allocated pa = coremap_allocuser(lp); if (pa == INVALID_PADDR) { coremap_unpin(lp->lp_paddr & PAGE_FRAME); lpage_destroy(lp); return ENOMEM; } KASSERT(coremap_pageispinned(pa)); lock_acquire(global_paging_lock); // Add page contents from swap to physical memory swap_pagein(pa, swa); lpage_lock(lp); lock_release(global_paging_lock); /* Assert nobody else did the pagein. */ KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); lp->lp_paddr = pa; } //Update TLB switch (faulttype){ case VM_FAULT_READONLY: mmu_map(as, va, pa, 0); break; case VM_FAULT_READ: case VM_FAULT_WRITE: // Set it to dirty LP_SET(lp, LPF_DIRTY); mmu_map(as, va, pa, 1); } // Already unpinned in mmu_map lpage_unlock(lp); return 0; }
/* * lpage_evict: Evict an lpage from physical memory. * * Synchronization: lock the lpage while evicting it. We come here * from the coremap and should * have pinned the physical page. This is why we must not hold lpage * locks while entering the coremap code. */ void lpage_evict(struct lpage *lp) { KASSERT(lp != NULL); lpage_lock(lp); KASSERT(lp->lp_paddr != INVALID_PADDR); KASSERT(lp->lp_swapaddr != INVALID_SWAPADDR); /* if the page is dirty, swap_pageout */ if (LP_ISDIRTY(lp)) { lpage_unlock(lp); // release lock before doing I/O KASSERT(lock_do_i_hold(global_paging_lock)); KASSERT(coremap_pageispinned(lp->lp_paddr)); swap_pageout((lp->lp_paddr & PAGE_FRAME), lp->lp_swapaddr); lpage_lock(lp); KASSERT((lp->lp_paddr & PAGE_FRAME) != INVALID_PADDR); /* update stats */ spinlock_acquire(&stats_spinlock); ct_write_evictions++; DEBUG (DB_VM, "lpage_evict: evicting Dirty page 0x%x\n", (lp->lp_paddr & PAGE_FRAME)); spinlock_release(&stats_spinlock); } else { /* if page is clean, just update stats */ spinlock_acquire(&stats_spinlock); ct_discard_evictions++; DEBUG (DB_VM, "lpage_evict: evicting Clean page 0x%x\n", (lp->lp_paddr & PAGE_FRAME)); spinlock_release(&stats_spinlock); } /* modify PTE to indicate that the page is no longer in memory. */ lp->lp_paddr = INVALID_PADDR; lpage_unlock(lp); }
/* * swap_io: Does one swap I/O. Panics on failure. * * Synchronization: none specifically. The physical page should be * marked "pinned" (locked) so it won't be touched by other people. */ static void swap_io(paddr_t pa, off_t swapaddr, enum uio_rw rw) { struct iovec iov; struct uio u; vaddr_t va; int result; KASSERT(lock_do_i_hold(global_paging_lock)); KASSERT(pa != INVALID_PADDR); KASSERT(swapaddr % PAGE_SIZE == 0); KASSERT(coremap_pageispinned(pa)); KASSERT(bitmap_isset(swapmap, swapaddr / PAGE_SIZE)); va = coremap_map_swap_page(pa); uio_kinit(&iov, &u, (char *)va, PAGE_SIZE, swapaddr, rw); if (rw==UIO_READ) { result = VOP_READ(swapstore, &u); } else { result = VOP_WRITE(swapstore, &u); } coremap_unmap_swap_page(va, pa); if (result==EIO) { panic("swap: EIO on swapfile (offset %ld)\n", (long)swapaddr); } else if (result==EINVAL) { panic("swap: EINVAL from swapfile (offset %ld)\n", (long)swapaddr); } else if (result) { panic("swap: Error %d from swapfile (offset %ld)\n", result, (long)swapaddr); } }
static int lpage_materialize(struct lpage **lpret, paddr_t *paret) { struct lpage *lp; paddr_t pa; off_t swa; lp = lpage_create(); if (lp == NULL) { return ENOMEM; } swa = swap_alloc(); if (swa == INVALID_SWAPADDR) { lpage_destroy(lp); return ENOSPC; } lp->lp_swapaddr = swa; pa = coremap_allocuser(lp); if (pa == INVALID_PADDR) { /* lpage_destroy will clean up the swap */ lpage_destroy(lp); return ENOSPC; } lpage_lock(lp); lp->lp_paddr = pa | LPF_DIRTY; KASSERT(coremap_pageispinned(pa)); *lpret = lp; *paret = pa; return 0; }
/* * lpage_fault - handle a fault on a specific lpage. If the page is * not resident, get a physical page from coremap and swap it in. * * You do not yet need to distinguish a readonly fault from a write * fault. When we implement sharing, there will be a difference. * * Synchronization: Lock the lpage while checking if it's in memory. * If it's not, unlock the page while allocating space and loading the * page in. This only works because lpages are not currently sharable. * The page should be locked again as soon as it is loaded, but be * careful of interactions with other locks while modifying the coremap. * * After it has been loaded, the page must be pinned so that it is not * evicted while changes are made to the TLB. It can be unpinned as soon * as the TLB is updated. */ int lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va) { KASSERT(lp != NULL); // kernel pages never get paged out, thus never fault lock_acquire(global_paging_lock); if ((lp->lp_paddr & PAGE_FRAME) != INVALID_PADDR) { lpage_lock_and_pin(lp); } else { lpage_lock(lp); } lock_release(global_paging_lock); KASSERT(lp->lp_swapaddr != INVALID_SWAPADDR); paddr_t pa = lp->lp_paddr; int writable; // 0 if page is read-only, 1 if page is writable /* case 1 - minor fault: the frame is still in memory */ if ((pa & PAGE_FRAME) != INVALID_PADDR) { /* make sure it's a minor fault */ KASSERT(pa != INVALID_PADDR); /* Setting the TLB entry's dirty bit */ writable = (faulttype != VM_FAULT_READ); /* update stats */ spinlock_acquire(&stats_spinlock); ct_minfaults++; DEBUG(DB_VM, "\nlpage_fault: minor faults = %d.", ct_minfaults); spinlock_release(&stats_spinlock); } else { /* case 2 - major fault: the frame was swapped out to disk */ /* make sure it is a major fault */ KASSERT(pa == INVALID_PADDR); /* allocate a new frame */ lpage_unlock(lp); // must not hold lpage locks before entering coremap pa = coremap_allocuser(lp); // do evict if needed, also pin coremap if ((pa & PAGE_FRAME)== INVALID_PADDR) { DEBUG(DB_VM, "lpage_fault: ENOMEM: va=0x%x\n", va); return ENOMEM; } KASSERT(coremap_pageispinned(pa)); /* retrieving the content from disk */ lock_acquire(global_paging_lock); // because swap_pagein needs it swap_pagein((pa & PAGE_FRAME), lp->lp_swapaddr); // coremap is already pinned above lpage_lock(lp); lock_release(global_paging_lock); /* assert that nobody else did the pagein */ KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); /* now update PTE with new PFN */ lp->lp_paddr = pa ; // page is clean /* Setting the TLB entry's dirty bit */ writable = 0; // this way we can detect the first write to a page /* update stats */ spinlock_acquire(&stats_spinlock); ct_majfaults++; DEBUG(DB_VM, "\nlpage_fault: MAJOR faults = %d", ct_majfaults); spinlock_release(&stats_spinlock); } /* check preconditions before update TLB/PTE */ KASSERT(coremap_pageispinned(lp->lp_paddr)); KASSERT(spinlock_do_i_hold(&lp->lp_spinlock)); /* PTE entry is dirty if the instruction is a write */ if (writable) { LP_SET(lp, LPF_DIRTY); } /* Put the new TLB entry into the TLB */ KASSERT(coremap_pageispinned(lp->lp_paddr)); // done in both cases of above IF clause mmu_map(as, va, lp->lp_paddr, writable); // update TLB and unpin coremap lpage_unlock(lp); return 0; }
/* * lpage_copy: create a new lpage and copy data from another lpage. * * The synchronization for this is kind of unpleasant. We do it like * this: * * 1. Create newlp. * 2. Materialize a page for newlp, so it's locked and pinned. * 3. Lock and pin oldlp. * 4. Extract the physical address and swap address. * 5. If oldlp wasn't present, * 5a. Unlock oldlp. * 5b. Page in. * 5c. This pins the page in the coremap. * 5d. Leave the page pinned and relock oldlp. * 5e. Assert nobody else paged the page in. * 6. Copy. * 7. Unlock the lpages first, so we can enter the coremap. * 8. Unpin the physical pages. * */ int lpage_copy(struct lpage *oldlp, struct lpage **lpret) { struct lpage *newlp; paddr_t newpa, oldpa; off_t swa; int result; result = lpage_materialize(&newlp, &newpa); if (result) { return result; } KASSERT(coremap_pageispinned(newpa)); /* Pin the physical page and lock the lpage. */ lpage_lock_and_pin(oldlp); oldpa = oldlp->lp_paddr & PAGE_FRAME; /* * If there is no physical page, we allocate one, which pins * it, and then (re)lock the lpage. Since we are single- * threaded (if we weren't, we'd hold the address space lock * to exclude sibling threads) nobody else should have paged * the page in behind our back. */ if (oldpa == INVALID_PADDR) { /* * XXX this is mostly copied from lpage_fault */ swa = oldlp->lp_swapaddr; lpage_unlock(oldlp); oldpa = coremap_allocuser(oldlp); if (oldpa == INVALID_PADDR) { coremap_unpin(newlp->lp_paddr & PAGE_FRAME); lpage_destroy(newlp); return ENOMEM; } KASSERT(coremap_pageispinned(oldpa)); lock_acquire(global_paging_lock); swap_pagein(oldpa, swa); lpage_lock(oldlp); lock_release(global_paging_lock); /* Assert nobody else did the pagein. */ KASSERT((oldlp->lp_paddr & PAGE_FRAME) == INVALID_PADDR); oldlp->lp_paddr = oldpa; } KASSERT(coremap_pageispinned(oldpa)); coremap_copy_page(oldpa, newpa); KASSERT(LP_ISDIRTY(newlp)); lpage_unlock(oldlp); lpage_unlock(newlp); coremap_unpin(newpa); coremap_unpin(oldpa); *lpret = newlp; return 0; }