Exemplo n.º 1
0
/*
 * tlb_invalidate: marks a given tlb entry as invalid.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block.
 */
static
void
tlb_invalidate(int tlbix)
{
	uint32_t elo, ehi;
	paddr_t pa;
	unsigned cmix;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

	tlb_read(&ehi, &elo, tlbix);
	if (elo & TLBLO_VALID) {
		pa = elo & TLBLO_PPAGE;
		cmix = PADDR_TO_COREMAP(pa);
		KASSERT(cmix < num_coremap_entries);
		KASSERT(coremap[cmix].cm_tlbix == tlbix);
		KASSERT(coremap[cmix].cm_cpunum == curcpu->c_number);
		coremap[cmix].cm_tlbix = -1;
		coremap[cmix].cm_cpunum = 0;
		DEBUG(DB_TLB, "... pa 0x%05lx --> tlb --\n", 
			(unsigned long) COREMAP_TO_PADDR(cmix));
	}

	tlb_write(TLBHI_INVALID(tlbix), TLBLO_INVALID(), tlbix);
	DEBUG(DB_TLB, "... pa ------- <-- tlb %d\n", tlbix);
}
Exemplo n.º 2
0
/*
 * Make a thread runnable.
 *
 * targetcpu might be curcpu; it might not be, too.
 */
static
void
thread_make_runnable(struct thread *target, bool already_have_lock)
{
	struct cpu *targetcpu;

	/* Lock the run queue of the target thread's cpu. */
	targetcpu = target->t_cpu;

	if (already_have_lock) {
		/* The target thread's cpu should be already locked. */
		KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
	}
	else {
		spinlock_acquire(&targetcpu->c_runqueue_lock);
	}

	/* Target thread is now ready to run; put it on the run queue. */
	target->t_state = S_READY;
	threadlist_addtail(&targetcpu->c_runqueue, target);

	if (targetcpu->c_isidle) {
		/*
		 * Other processor is idle; send interrupt to make
		 * sure it unidles.
		 */
		ipi_send(targetcpu, IPI_UNIDLE);
	}

	if (!already_have_lock) {
		spinlock_release(&targetcpu->c_runqueue_lock);
	}
}
Exemplo n.º 3
0
/*
 * lpage_zerofill: create a new lpage and arrange for it to be cleared
 * to all zeros. The current implementation causes the lpage to be
 * resident upon return, but this is not a guaranteed property, and
 * nothing prevents the page from being evicted before it is used by
 * the caller.
 *
 * Synchronization: coremap_allocuser returns the new physical page
 * "pinned" (locked) - we hold that lock while we update the page
 * contents and the necessary lpage fields. Unlock the lpage before
 * unpinning, so it's safe to take the coremap spinlock.
 */
int
lpage_zerofill(struct lpage **lpret)
{
	struct lpage *lp;
	paddr_t pa;
	int result;

	result = lpage_materialize(&lp, &pa);
	if (result) {
		return result;
	}
	KASSERT(spinlock_do_i_hold(&lp->lp_spinlock));
	KASSERT(coremap_pageispinned(pa));

	/* Don't actually need the lpage locked. */
	lpage_unlock(lp);

	coremap_zero_page(pa);

	KASSERT(coremap_pageispinned(pa));
	coremap_unpin(pa);

	spinlock_acquire(&stats_spinlock);
	ct_zerofills++;
	spinlock_release(&stats_spinlock);

	*lpret = lp;
	return 0;
}
Exemplo n.º 4
0
/*
 * Wake up all threads sleeping on a wait channel.
 */
void
wchan_wakeall(struct wchan *wc, struct spinlock *lk)
{
	struct thread *target;
	struct threadlist list;

	KASSERT(spinlock_do_i_hold(lk));

	threadlist_init(&list);

	/*
	 * Grab all the threads from the channel, moving them to a
	 * private list.
	 */
	while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) {
		threadlist_addtail(&list, target);
	}

	/*
	 * We could conceivably sort by cpu first to cause fewer lock
	 * ops and fewer IPIs, but for now at least don't bother. Just
	 * make each thread runnable.
	 */
	while ((target = threadlist_remhead(&list)) != NULL) {
		thread_make_runnable(target, false);
	}

	threadlist_cleanup(&list);
}
Exemplo n.º 5
0
static
void
checksubpages(void)
{
	struct pageref *pr;
	int i;
	unsigned sc=0, ac=0;

	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	for (i=0; i<NSIZES; i++) {
		for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
			checksubpage(pr);
			KASSERT(sc < NPAGEREFS);
			sc++;
		}
	}

	for (pr = allbase; pr != NULL; pr = pr->next_all) {
		checksubpage(pr);
		KASSERT(ac < NPAGEREFS);
		ac++;
	}

	KASSERT(sc==ac);
}
Exemplo n.º 6
0
static
void
checksubpage(struct pageref *pr)
{
	vaddr_t prpage, fla;
	struct freelist *fl;
	int blktype;
	int nfree=0;

	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	if (pr->freelist_offset == INVALID_OFFSET) {
		KASSERT(pr->nfree==0);
		return;
	}

	prpage = PR_PAGEADDR(pr);
	blktype = PR_BLOCKTYPE(pr);

	KASSERT(pr->freelist_offset < PAGE_SIZE);
	KASSERT(pr->freelist_offset % sizes[blktype] == 0);

	fla = prpage + pr->freelist_offset;
	fl = (struct freelist *)fla;

	for (; fl != NULL; fl = fl->next) {
		fla = (vaddr_t)fl;
		KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE);
		KASSERT((fla-prpage) % sizes[blktype] == 0);
		KASSERT(fla >= MIPS_KSEG0);
		KASSERT(fla < MIPS_KSEG1);
		nfree++;
}
	KASSERT(nfree==pr->nfree);
}
Exemplo n.º 7
0
/*
 * Wake up one thread sleeping on a wait channel.
 */
void
wchan_wakeone(struct wchan *wc, struct spinlock *lk)
{
	struct thread *target;

	KASSERT(spinlock_do_i_hold(lk));

	/* Grab a thread from the channel */
	target = threadlist_remhead(&wc->wc_threads);

	if (target == NULL) {
		/* Nobody was sleeping. */
		return;
	}

	/*
	 * Note that thread_make_runnable acquires a runqueue lock
	 * while we're holding LK. This is ok; all spinlocks
	 * associated with wchans must come before the runqueue locks,
	 * as we also bridge from the wchan lock to the runqueue lock
	 * in thread_switch.
	 */

	thread_make_runnable(target, false);
}
Exemplo n.º 8
0
/*
 * Return nonzero if there are no threads sleeping on the channel.
 * This is meant to be used only for diagnostic purposes.
 */
bool
wchan_isempty(struct wchan *wc, struct spinlock *lk)
{
	bool ret;

	KASSERT(spinlock_do_i_hold(lk));
	ret = threadlist_isempty(&wc->wc_threads);

	return ret;
}
Exemplo n.º 9
0
/*
 * tlb_clear: flushes the TLB by loading it with invalid entries.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block.
 */
static
void
tlb_clear(void)
{
	int i;	

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));
	for (i=0; i<NUM_TLB; i++) {
		tlb_invalidate(i);
	}
	curcpu->c_vm.cvm_nexttlb = 0;
}
Exemplo n.º 10
0
/*
 * Print the allocated/freed map of a single kernel heap page.
 */
static
void
subpage_stats(struct pageref *pr)
{
	vaddr_t prpage, fla;
	struct freelist *fl;
	int blktype;
	unsigned i, n, index;
	uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)];

	checksubpage(pr);
	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	/* clear freemap[] */
	for (i=0; i<ARRAYCOUNT(freemap); i++) {
		freemap[i] = 0;
	}

	prpage = PR_PAGEADDR(pr);
	blktype = PR_BLOCKTYPE(pr);
	KASSERT(blktype >= 0 && blktype < NSIZES);

	/* compute how many bits we need in freemap and assert we fit */
	n = PAGE_SIZE / sizes[blktype];
	KASSERT(n <= 32 * ARRAYCOUNT(freemap));

	if (pr->freelist_offset != INVALID_OFFSET) {
		fla = prpage + pr->freelist_offset;
		fl = (struct freelist *)fla;

		for (; fl != NULL; fl = fl->next) {
			fla = (vaddr_t)fl;
			index = (fla-prpage) / sizes[blktype];
			KASSERT(index<n);
			freemap[index/32] |= (1<<(index%32));
		}
	}

	kprintf("at 0x%08lx: size %-4lu  %u/%u free\n",
		(unsigned long)prpage, (unsigned long) sizes[blktype],
		(unsigned) pr->nfree, n);
	kprintf("   ");
	for (i=0; i<n; i++) {
		int val = (freemap[i/32] & (1<<(i%32)))!=0;
		kprintf("%c", val ? '.' : '*');
		if (i%64==63 && i<n-1) {
			kprintf("\n   ");
		}
	}
	kprintf("\n");
}
Exemplo n.º 11
0
static
int
piggish_kernel(int proposed_kernel_pages)
{
	uint32_t nkp;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

	nkp = num_coremap_kernel + proposed_kernel_pages ;
	if (nkp >= num_coremap_entries - CM_MIN_SLACK) {
		return 1;
	}
	return 0;
}
Exemplo n.º 12
0
static
void
lser_poll_until_write(struct lser_softc *sc)
{
	uint32_t val;

	KASSERT(spinlock_do_i_hold(&sc->ls_lock));

	do {
		val = bus_read_register(sc->ls_busdata, sc->ls_buspos,
					LSER_REG_WIRQ);
	}
	while ((val & LSER_IRQ_ACTIVE) == 0);
}
Exemplo n.º 13
0
/*
 * Yield the cpu to another process, and go to sleep, on the specified
 * wait channel WC, whose associated spinlock is LK. Calling wakeup on
 * the channel will make the thread runnable again. The spinlock must
 * be locked. The call to thread_switch unlocks it; we relock it
 * before returning.
 */
void
wchan_sleep(struct wchan *wc, struct spinlock *lk)
{
	/* may not sleep in an interrupt handler */
	KASSERT(!curthread->t_in_interrupt);

	/* must hold the spinlock */
	KASSERT(spinlock_do_i_hold(lk));

	/* must not hold other spinlocks */
	KASSERT(curcpu->c_spinlocks == 1);

	thread_switch(S_SLEEP, wc, lk);
	spinlock_acquire(lk);
}
Exemplo n.º 14
0
bool
get_coremap_spinlock()
{
	if(spinlock_do_i_hold(&stealmem_lock))
	{
		return 0;
	}
	else
	{
		spinlock_acquire(&stealmem_lock);
		// KASSERT(spinlock_do_i_hold(&stealmem_lock));
		// DEBUG(DB_SWAP, "\n**GL**\n");
		return 1;
	}
}
Exemplo n.º 15
0
/*
 * tlb_replace - TLB replacement algorithm. Returns index of TLB entry
 * to replace.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block.
 */
static
uint32_t 
tlb_replace(void) 
{
	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

#if OPT_RANDTLB
	/* random */
	return random() % NUM_TLB;
#else
	/* sequential */
	uint32_t slot = curcpu->c_vm.cvm_tlbseqslot;
	curcpu->c_vm.cvm_tlbseqslot = (slot + 1) % NUM_TLB;
	return slot;
#endif
}
Exemplo n.º 16
0
void					
coremap_print_short(void)
{
	uint32_t i, atbol=1;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));
		
	kprintf("Coremap: %u entries, %uk/%uu/%uf\n",
		num_coremap_entries,
		num_coremap_kernel, num_coremap_user, num_coremap_free);

	for (i=0; i<num_coremap_entries; i++) {
		if (atbol) {
			kprintf("0x%x: ", COREMAP_TO_PADDR(i));
			atbol=0;
		}
		if (coremap[i].cm_kernel && coremap[i].cm_notlast) {
			kprintf("k");
		}
		else if (coremap[i].cm_kernel) {
			kprintf("K");
		}
		else if (coremap[i].cm_allocated && coremap[i].cm_pinned) {
			kprintf("&");
		}
		else if (coremap[i].cm_allocated) {
			kprintf("*");
		}
		else {
			kprintf(".");
		}
		if (i%NCOLS==NCOLS-1) {
			kprintf("\n");
			atbol=1;
		}
	}
	if (!atbol) {
		kprintf("\n");
	}
}
Exemplo n.º 17
0
static
int
do_page_replace(void)
{
	int where;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));
	KASSERT(lock_do_i_hold(global_paging_lock));

	where = page_replace();

	KASSERT(coremap[where].cm_pinned==0);
	KASSERT(coremap[where].cm_kernel==0);

	if (coremap[where].cm_allocated) {
		KASSERT(coremap[where].cm_lpage != NULL);
		KASSERT(curthread != NULL && !curthread->t_in_interrupt);
		do_evict(where);
	}

	return where;
}
Exemplo n.º 18
0
static
void
mark_pages_allocated(int start, int npages, int dopin, int iskern)
{
	int i;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));
	for (i=start; i<start+npages; i++) {
		KASSERT(coremap[i].cm_pinned==0);
		KASSERT(coremap[i].cm_allocated==0);
		KASSERT(coremap[i].cm_kernel==0);
		KASSERT(coremap[i].cm_lpage==NULL);
		KASSERT(coremap[i].cm_tlbix<0);
		KASSERT(coremap[i].cm_cpunum == 0);

		if (dopin) {
			coremap[i].cm_pinned = 1;
		}
		coremap[i].cm_allocated = 1;
		if (iskern) {
			coremap[i].cm_kernel = 1;
		}

		if (i < start+npages-1) {
			coremap[i].cm_notlast = 1;
		}
	}
	if (iskern) {
		num_coremap_kernel += npages;
	}
	else {
		num_coremap_user += npages;
	}
	num_coremap_free -= npages;
	KASSERT(num_coremap_kernel+num_coremap_user+num_coremap_free
	       == num_coremap_entries);
}
Exemplo n.º 19
0
/*
 * tlb_unmap: Searches the TLB for a vaddr translation and invalidates
 * it if it exists.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block. 
 */
static
void
tlb_unmap(vaddr_t va)
{
	int i;
	uint32_t elo = 0, ehi = 0;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

	KASSERT(va < MIPS_KSEG0);

	i = tlb_probe(va & PAGE_FRAME,0);
	if (i < 0) {
		return;
	}
	
	tlb_read(&ehi, &elo, i);
	
	KASSERT(elo & TLBLO_VALID);
	
	DEBUG(DB_TLB, "invalidating tlb slot %d (va: 0x%x)\n", i, va); 
	
	tlb_invalidate(i);
}
Exemplo n.º 20
0
/*
 * Check that a particular heap page (the one managed by the argument
 * PR) is valid.
 *
 * This checks:
 *    - that the page is within MIPS_KSEG0 (for mips)
 *    - that the freelist starting point in PR is valid
 *    - that the number of free blocks is consistent with the freelist
 *    - that each freelist next pointer points within the page
 *    - that no freelist pointer points to the middle of a block
 *    - that free blocks are still deadbeefed (if CHECKBEEF)
 *    - that the freelist is not circular
 *    - that the guard bands are intact on all allocated blocks (if
 *      CHECKGUARDS)
 *
 * Note that if CHECKGUARDS is set, a circular freelist will cause an
 * assertion as a bit in isfree is set twice; if not, a circular
 * freelist will cause an infinite loop.
 */
static
void
checksubpage(struct pageref *pr)
{
	vaddr_t prpage, fla;
	struct freelist *fl;
	int blktype;
	int nfree=0;
	size_t blocksize;
#ifdef CHECKGUARDS
	const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE;
	const unsigned numfreewords = DIVROUNDUP(maxblocks, 32);
	uint32_t isfree[numfreewords], mask;
	unsigned numblocks, blocknum, i;
	size_t smallerblocksize;
#endif

	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	if (pr->freelist_offset == INVALID_OFFSET) {
		KASSERT(pr->nfree==0);
		return;
	}

	prpage = PR_PAGEADDR(pr);
	blktype = PR_BLOCKTYPE(pr);
	KASSERT(blktype >= 0 && blktype < NSIZES);
	blocksize = sizes[blktype];

#ifdef CHECKGUARDS
	smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
	for (i=0; i<numfreewords; i++) {
		isfree[i] = 0;
	}
#endif

#ifdef __mips__
	KASSERT(prpage >= MIPS_KSEG0);
	KASSERT(prpage < MIPS_KSEG1);
#endif

	KASSERT(pr->freelist_offset < PAGE_SIZE);
	KASSERT(pr->freelist_offset % blocksize == 0);

	fla = prpage + pr->freelist_offset;
	fl = (struct freelist *)fla;

	for (; fl != NULL; fl = fl->next) {
		fla = (vaddr_t)fl;
		KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE);
		KASSERT((fla-prpage) % blocksize == 0);
#ifdef CHECKBEEF
		checkdeadbeef(fl, blocksize);
#endif
#ifdef CHECKGUARDS
		blocknum = (fla-prpage) / blocksize;
		mask = 1U << (blocknum % 32);
		KASSERT((isfree[blocknum / 32] & mask) == 0);
		isfree[blocknum / 32] |= mask;
#endif
		KASSERT(fl->next != fl);
		nfree++;
	}
	KASSERT(nfree==pr->nfree);

#ifdef CHECKGUARDS
	numblocks = PAGE_SIZE / blocksize;
	for (i=0; i<numblocks; i++) {
		mask = 1U << (i % 32);
		if ((isfree[i / 32] & mask) == 0) {
			checkguardband(prpage + i * blocksize,
				       smallerblocksize, blocksize);
		}
	}
#endif
}
Exemplo n.º 21
0
bool
coremap_spinlock_do_i_hold()
{
	return spinlock_do_i_hold(&stealmem_lock);
}
Exemplo n.º 22
0
static
void
do_evict(int where)
{
	struct lpage *lp;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));
	KASSERT(curthread != NULL && !curthread->t_in_interrupt);
	KASSERT(lock_do_i_hold(global_paging_lock));

	KASSERT(coremap[where].cm_pinned==0);
	KASSERT(coremap[where].cm_allocated);
	KASSERT(coremap[where].cm_kernel==0);

	lp = coremap[where].cm_lpage;
	KASSERT(lp != NULL);

	/*
	 * Pin it now, so it doesn't get e.g. paged out by someone
	 * else while we're waiting for TLB shootdown.
	 */
	coremap[where].cm_pinned = 1;

	if (coremap[where].cm_tlbix >= 0) {
		if (coremap[where].cm_cpunum != curcpu->c_number) {
			/* yay, TLB shootdown */
			struct tlbshootdown ts;
			ts.ts_tlbix = coremap[where].cm_tlbix;
			ts.ts_coremapindex = where;
			ct_shootdowns_sent++;
			ipi_tlbshootdown(coremap[where].cm_cpunum, &ts);
			while (coremap[where].cm_tlbix != -1) {
				tlb_shootwait();
			}
			KASSERT(coremap[where].cm_tlbix == -1);
			KASSERT(coremap[where].cm_cpunum == 0);
			KASSERT(coremap[where].cm_lpage == lp);
		}
		else {
			tlb_invalidate(coremap[where].cm_tlbix);
			coremap[where].cm_tlbix = -1;
			coremap[where].cm_cpunum = 0;
		}
		DEBUG(DB_TLB, "... pa 0x%05lx --> tlb --\n", 
		      (unsigned long) COREMAP_TO_PADDR(where));
	}

	/* properly we ought to lock the lpage to test this */
	KASSERT(COREMAP_TO_PADDR(where) == (lp->lp_paddr & PAGE_FRAME));

	/* release the coremap spinlock in case we need to swap out */
	spinlock_release(&coremap_spinlock);

	lpage_evict(lp);

	spinlock_acquire(&coremap_spinlock);

	/* because the page is pinned these shouldn't have changed */
	KASSERT(coremap[where].cm_allocated == 1);
	KASSERT(coremap[where].cm_lpage == lp);
	KASSERT(coremap[where].cm_pinned == 1);

	coremap[where].cm_allocated = 0;
	coremap[where].cm_lpage = NULL;
	coremap[where].cm_pinned = 0;

	num_coremap_user--;
	num_coremap_free++;
	KASSERT(num_coremap_kernel+num_coremap_user+num_coremap_free
	       == num_coremap_entries);

	wchan_wakeall(coremap_pinchan);
}
Exemplo n.º 23
0
void
lpage_unlock(struct lpage *lp)
{
	KASSERT(spinlock_do_i_hold(&lp->lp_spinlock));
	spinlock_release(&lp->lp_spinlock);
}
Exemplo n.º 24
0
/*
 * lpage_fault - handle a fault on a specific lpage. If the page is
 * not resident, get a physical page from coremap and swap it in.
 * 
 * You do not yet need to distinguish a readonly fault from a write
 * fault. When we implement sharing, there will be a difference.
 *
 * Synchronization: Lock the lpage while checking if it's in memory. 
 * If it's not, unlock the page while allocating space and loading the
 * page in. This only works because lpages are not currently sharable.
 * The page should be locked again as soon as it is loaded, but be 
 * careful of interactions with other locks while modifying the coremap.
 *
 * After it has been loaded, the page must be pinned so that it is not
 * evicted while changes are made to the TLB. It can be unpinned as soon
 * as the TLB is updated. 
 */
int
lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va)
{
	KASSERT(lp != NULL); // kernel pages never get paged out, thus never fault

	lock_acquire(global_paging_lock);
	if ((lp->lp_paddr & PAGE_FRAME) != INVALID_PADDR) {
		lpage_lock_and_pin(lp);
	} else {
		lpage_lock(lp);
	}
	lock_release(global_paging_lock);

	KASSERT(lp->lp_swapaddr != INVALID_SWAPADDR);

	paddr_t pa = lp->lp_paddr;
	int writable; // 0 if page is read-only, 1 if page is writable

    /* case 1 - minor fault: the frame is still in memory */
	if ((pa & PAGE_FRAME) != INVALID_PADDR) {

		/* make sure it's a minor fault */
		KASSERT(pa != INVALID_PADDR);

		/* Setting the TLB entry's dirty bit */
		writable = (faulttype != VM_FAULT_READ);

		/* update stats */
		spinlock_acquire(&stats_spinlock);
		ct_minfaults++;
		DEBUG(DB_VM, "\nlpage_fault: minor faults = %d.", ct_minfaults);
		spinlock_release(&stats_spinlock);

	} else {
		/* case 2 - major fault: the frame was swapped out to disk */

		/* make sure it is a major fault */
		KASSERT(pa == INVALID_PADDR);

		/* allocate a new frame */
		lpage_unlock(lp); // must not hold lpage locks before entering coremap
		pa = coremap_allocuser(lp); // do evict if needed, also pin coremap
		if ((pa & PAGE_FRAME)== INVALID_PADDR) {
			DEBUG(DB_VM, "lpage_fault: ENOMEM: va=0x%x\n", va);
			return ENOMEM;
		}
		KASSERT(coremap_pageispinned(pa));

		/* retrieving the content from disk */
		lock_acquire(global_paging_lock); // because swap_pagein needs it
		swap_pagein((pa & PAGE_FRAME), lp->lp_swapaddr); // coremap is already pinned above
		lpage_lock(lp);
		lock_release(global_paging_lock);

		/* assert that nobody else did the pagein */
		KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR);

		/* now update PTE with new PFN */
		lp->lp_paddr = pa ; // page is clean

		/* Setting the TLB entry's dirty bit */
		writable = 0; // this way we can detect the first write to a page

		/* update stats */
		spinlock_acquire(&stats_spinlock);
		ct_majfaults++;
		DEBUG(DB_VM, "\nlpage_fault: MAJOR faults = %d", ct_majfaults);
		spinlock_release(&stats_spinlock);
	}

	/* check preconditions before update TLB/PTE */
	KASSERT(coremap_pageispinned(lp->lp_paddr));
	KASSERT(spinlock_do_i_hold(&lp->lp_spinlock));

	/* PTE entry is dirty if the instruction is a write */
	if (writable) {
		LP_SET(lp, LPF_DIRTY);
	}

	/* Put the new TLB entry into the TLB */
	KASSERT(coremap_pageispinned(lp->lp_paddr)); // done in both cases of above IF clause
	mmu_map(as, va, lp->lp_paddr, writable); // update TLB and unpin coremap
	lpage_unlock(lp);

	return 0;
}