Example #1
0
static
int
lpage_materialize(struct lpage **lpret, paddr_t *paret)
{
	struct lpage *lp;
	paddr_t pa;
	off_t swa;

	lp = lpage_create();
	if (lp == NULL) {
		return ENOMEM;
	}

	swa = swap_alloc();
	if (swa == INVALID_SWAPADDR) {
		lpage_destroy(lp);
		return ENOSPC;
	}
	lp->lp_swapaddr = swa;

	pa = coremap_allocuser(lp);
	if (pa == INVALID_PADDR) {
		/* lpage_destroy will clean up the swap */
		lpage_destroy(lp);
		return ENOSPC;
	}

	lpage_lock(lp);

	lp->lp_paddr = pa | LPF_DIRTY;

	KASSERT(coremap_pageispinned(pa));

	*lpret = lp;
	*paret = pa;
	return 0;
}
Example #2
0
File: lpage.c Project: Adam-Koza/A3
/*
 * lpage_fault - handle a fault on a specific lpage. If the page is
 * not resident, get a physical page from coremap and swap it in.
 * 
 * You do not yet need to distinguish a readonly fault from a write
 * fault. When we implement sharing, there will be a difference.
 *
 * Synchronization: Lock the lpage while checking if it's in memory. 
 * If it's not, unlock the page while allocting space and loading the
 * page in. This only works because lpages are not currently sharable.
 * The page should be locked again as soon as it is loaded, but be 
 * careful of interactions with other locks while modifying the coremap.
 *
 * After it has been loaded, the page must be pinned so that it is not
 * evicted while changes are made to the TLB. It can be unpinned as soon
 * as the TLB is updated. 
 */
int
lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va)
{
	paddr_t pa, swa;

	/* Pin the physical page and lock the lpage. */
	lpage_lock_and_pin(lp);
	// Get the physical address
	pa = lp->lp_paddr & PAGE_FRAME;

	// If the page is not in memeory, get it from swap
	if (pa == INVALID_PADDR) {
			swa = lp->lp_swapaddr;
			lpage_unlock(lp);
			// Have a page frame allocated
			pa = coremap_allocuser(lp);
			if (pa == INVALID_PADDR) {
				coremap_unpin(lp->lp_paddr & PAGE_FRAME);
				lpage_destroy(lp);
				return ENOMEM;
			}
			KASSERT(coremap_pageispinned(pa));
			lock_acquire(global_paging_lock);
			// Add page contents from swap to physical memory
			swap_pagein(pa, swa);
			lpage_lock(lp);
			lock_release(global_paging_lock);
			/* Assert nobody else did the pagein. */
			KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR);
			lp->lp_paddr = pa;
	}

	//Update TLB
	switch (faulttype){
	case VM_FAULT_READONLY:
		mmu_map(as, va, pa, 0);
		break;
	case VM_FAULT_READ:
	case VM_FAULT_WRITE:
		// Set it to dirty
		LP_SET(lp, LPF_DIRTY);
		mmu_map(as, va, pa, 1);
	}

	// Already unpinned in mmu_map
	lpage_unlock(lp);

	return 0;
}
Example #3
0
/*
 * vm_object_setsize: change the size of a vm_object.
 */
int
vm_object_setsize(struct addrspace *as, struct vm_object *vmo, unsigned npages)
{
    int result;
    unsigned i;
    struct lpage *lp;

    KASSERT(vmo != NULL);
    KASSERT(vmo->vmo_lpages != NULL);

    if (npages < lpage_array_num(vmo->vmo_lpages)) {
        for (i=npages; i<lpage_array_num(vmo->vmo_lpages); i++) {
            lp = lpage_array_get(vmo->vmo_lpages, i);
            if (lp != NULL) {
                KASSERT(as != NULL);
                /* remove any tlb entry for this mapping */
                mmu_unmap(as, vmo->vmo_base+PAGE_SIZE*i);
                lpage_destroy(lp);
            }
            else {
                swap_unreserve(1);
            }
        }
        result = lpage_array_setsize(vmo->vmo_lpages, npages);
        /* shrinking an array shouldn't fail */
        KASSERT(result==0);
    }
    else if (npages > lpage_array_num(vmo->vmo_lpages)) {
        int oldsize = lpage_array_num(vmo->vmo_lpages);
        unsigned newpages = npages - oldsize;

        result = swap_reserve(newpages);
        if (result) {
            return result;
        }

        result = lpage_array_setsize(vmo->vmo_lpages, npages);
        if (result) {
            swap_unreserve(newpages);
            return result;
        }
        for (i=oldsize; i<npages; i++) {
            lpage_array_set(vmo->vmo_lpages, i, NULL);
        }
    }
    return 0;
}
Example #4
0
/*
 * lpage_copy: create a new lpage and copy data from another lpage.
 *
 * The synchronization for this is kind of unpleasant. We do it like
 * this:
 *
 *      1. Create newlp.
 *      2. Materialize a page for newlp, so it's locked and pinned.
 *      3. Lock and pin oldlp.
 *      4. Extract the physical address and swap address.
 *      5. If oldlp wasn't present,
 *      5a.    Unlock oldlp.
 *      5b.    Page in.
 *      5c.    This pins the page in the coremap.
 *      5d.    Leave the page pinned and relock oldlp.
 *      5e.    Assert nobody else paged the page in.
 *      6. Copy.
 *      7. Unlock the lpages first, so we can enter the coremap.
 *      8. Unpin the physical pages.
 *      
 */
int
lpage_copy(struct lpage *oldlp, struct lpage **lpret)
{
	struct lpage *newlp;
	paddr_t newpa, oldpa;
	off_t swa;
	int result;

	result = lpage_materialize(&newlp, &newpa);
	if (result) {
		return result;
	}
	KASSERT(coremap_pageispinned(newpa));

	/* Pin the physical page and lock the lpage. */
	lpage_lock_and_pin(oldlp);
	oldpa = oldlp->lp_paddr & PAGE_FRAME;

	/*
	 * If there is no physical page, we allocate one, which pins
	 * it, and then (re)lock the lpage. Since we are single-
	 * threaded (if we weren't, we'd hold the address space lock
	 * to exclude sibling threads) nobody else should have paged
	 * the page in behind our back.
	 */
	if (oldpa == INVALID_PADDR) {
		/*
		 * XXX this is mostly copied from lpage_fault
		 */
		swa = oldlp->lp_swapaddr;
		lpage_unlock(oldlp);
		oldpa = coremap_allocuser(oldlp);
		if (oldpa == INVALID_PADDR) {
			coremap_unpin(newlp->lp_paddr & PAGE_FRAME);
			lpage_destroy(newlp);
			return ENOMEM;
		}
		KASSERT(coremap_pageispinned(oldpa));
		lock_acquire(global_paging_lock);
		swap_pagein(oldpa, swa);
		lpage_lock(oldlp);
		lock_release(global_paging_lock);
		/* Assert nobody else did the pagein. */
		KASSERT((oldlp->lp_paddr & PAGE_FRAME) == INVALID_PADDR);
		oldlp->lp_paddr = oldpa;
	}

	KASSERT(coremap_pageispinned(oldpa));

	coremap_copy_page(oldpa, newpa);

	KASSERT(LP_ISDIRTY(newlp));

	lpage_unlock(oldlp);
	lpage_unlock(newlp);

	coremap_unpin(newpa);
	coremap_unpin(oldpa);

	*lpret = newlp;
	return 0;
}