Exemple #1
0
/*
 * lpage_fault - handle a fault on a specific lpage. If the page is
 * not resident, get a physical page from coremap and swap it in.
 * 
 * You do not yet need to distinguish a readonly fault from a write
 * fault. When we implement sharing, there will be a difference.
 *
 * Synchronization: Lock the lpage while checking if it's in memory. 
 * If it's not, unlock the page while allocting space and loading the
 * page in. This only works because lpages are not currently sharable.
 * The page should be locked again as soon as it is loaded, but be 
 * careful of interactions with other locks while modifying the coremap.
 *
 * After it has been loaded, the page must be pinned so that it is not
 * evicted while changes are made to the TLB. It can be unpinned as soon
 * as the TLB is updated. 
 */
int
lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va)
{
	paddr_t pa, swa;

	/* Pin the physical page and lock the lpage. */
	lpage_lock_and_pin(lp);
	// Get the physical address
	pa = lp->lp_paddr & PAGE_FRAME;

	// If the page is not in memeory, get it from swap
	if (pa == INVALID_PADDR) {
			swa = lp->lp_swapaddr;
			lpage_unlock(lp);
			// Have a page frame allocated
			pa = coremap_allocuser(lp);
			if (pa == INVALID_PADDR) {
				coremap_unpin(lp->lp_paddr & PAGE_FRAME);
				lpage_destroy(lp);
				return ENOMEM;
			}
			KASSERT(coremap_pageispinned(pa));
			lock_acquire(global_paging_lock);
			// Add page contents from swap to physical memory
			swap_pagein(pa, swa);
			lpage_lock(lp);
			lock_release(global_paging_lock);
			/* Assert nobody else did the pagein. */
			KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR);
			lp->lp_paddr = pa;
	}

	//Update TLB
	switch (faulttype){
	case VM_FAULT_READONLY:
		mmu_map(as, va, pa, 0);
		break;
	case VM_FAULT_READ:
	case VM_FAULT_WRITE:
		// Set it to dirty
		LP_SET(lp, LPF_DIRTY);
		mmu_map(as, va, pa, 1);
	}

	// Already unpinned in mmu_map
	lpage_unlock(lp);

	return 0;
}
Exemple #2
0
mmu_table_t* mmu_init(void) {
	mmu_init_hal();

	/* initialize kernel table */
	mmu_table_t* table = mmu_init_process(1);

	if (table == NULL) {
		return NULL;
	}

	/* kernel mappings */
	int i;
	unsigned int start, end;
	size_t mapped;
	for (i = 0; i < KERNEL_MAPPINGS_SIZE; i += 2) {
		start = kernel_mappings[i];
		end = kernel_mappings[i + 1];

		int j;
		for (j = start; j < end; j += mapped) {
			mapped = mmu_map(table, (void*)j, (void*)j);
			if (!mapped) {
				free(table); /* TODO: destroy table */
				return NULL;
			}
		}
	}

	return table;
}
Exemple #3
0
/*
 * Terminate specified virtual memory space.
 * This is called when task is terminated.
 */
void
vm_terminate(vm_map_t map)
{
	struct region *reg, *tmp;

	if (--map->refcnt >= 1)
		return;

	sched_lock();
	reg = &map->head;
	do {
		if (reg->flags != REG_FREE) {
			/* Unmap region */
			mmu_map(map->pgd, reg->phys, reg->addr,
				reg->size, PG_UNMAP);

			/* Free region if it is not shared and mapped */
			if (!(reg->flags & REG_SHARED) &&
			    !(reg->flags & REG_MAPPED)) {
				page_free(reg->phys, reg->size);
			}
		}
		tmp = reg;
		reg = reg->next;
		region_delete(&map->head, tmp);
	} while (reg != &map->head);

	mmu_delmap(map->pgd);
	kmem_free(map);
	sched_unlock();
}
Exemple #4
0
static int
do_free(vm_map_t map, void *addr)
{
	struct region *reg;

	addr = (void *)PAGE_TRUNC(addr);

	/*
	 * Find the target region.
	 */
	reg = region_find(&map->head, addr, 1);
	if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE))
		return EINVAL;

	/*
	 * Unmap pages of the region.
	 */
	mmu_map(map->pgd, reg->phys, reg->addr,	reg->size, PG_UNMAP);

	/*
	 * Relinquish use of the page if it is not shared and mapped.
	 */
	if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
		page_free(reg->phys, reg->size);

	region_free(&map->head, reg);
	return 0;
}
Exemple #5
0
static void maptask(struct task *task, unsigned int index, unsigned int paddress, unsigned int vaddress, unsigned int size)
{

    struct mmu_directory *directory = gettaskdirectory(task->id);
    struct mmu_table *table = (struct mmu_table *)(directory + 1) + index;

    mmu_map(directory, &table[index], paddress, vaddress, size, MMU_TFLAG_PRESENT | MMU_TFLAG_WRITEABLE | MMU_TFLAG_USERMODE, MMU_PFLAG_PRESENT | MMU_PFLAG_WRITEABLE | MMU_PFLAG_USERMODE);

}
Exemple #6
0
static void mapkernel(unsigned int index, unsigned int paddress, unsigned int vaddress, unsigned int size)
{

    struct mmu_directory *directory = getkerneldirectory();
    struct mmu_table *table = (struct mmu_table *)(directory + 1) + index;

    mmu_map(directory, &table[index], paddress, vaddress, size, MMU_TFLAG_PRESENT | MMU_TFLAG_WRITEABLE, MMU_PFLAG_PRESENT | MMU_PFLAG_WRITEABLE);

}
Exemple #7
0
/*
 * lpage_fault - handle a fault on a specific lpage. If the page is
 * not resident, get a physical page from coremap and swap it in.
 * 
 * You do not yet need to distinguish a readonly fault from a write
 * fault. When we implement sharing, there will be a difference.
 *
 * Synchronization: Lock the lpage while checking if it's in memory. 
 * If it's not, unlock the page while allocting space and loading the
 * page in. This only works because lpages are not currently sharable.
 * The page should be locked again as soon as it is loaded, but be 
 * careful of interactions with other locks while modifying the coremap.
 *
 * After it has been loaded, the page must be pinned so that it is not
 * evicted while changes are made to the TLB. It can be unpinned as soon
 * as the TLB is updated. 
 */
int
lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va)
{
	
	paddr_t pa = lp->lp_paddr & PAGE_FRAME;
	off_t swap = lp->lp_swapaddr;

	int writable = 0;

	//lock the page
	lpage_lock_and_pin(lp);

	//If the page is not in RAM, load into RAM
	if(pa == INVALID_PADDR) {
		//unlock the page if its not 
		lpage_unlock(lp);

		//allocate a page and pin it
		pa = coremap_allocuser(lp);
		if(pa == INVALID_PADDR) {
			coremap_unpin(lp->lp_paddr & PAGE_FRAME);
			return ENOMEM;
		}

		//assert the page is pinned and lock
		KASSERT(coremap_pageispinned(pa));
		lock_acquire(global_paging_lock);

		//fetch from disk and put in RAM
		swap_pagein(pa, swap);

		//release locks
		lpage_lock(lp);
		lock_release(global_paging_lock);

		//make sure nobody else paged in the page
		KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR);

		//set the pages new phyiscal address
		lp->lp_paddr = pa;
	}

	if(faulttype == VM_FAULT_WRITE || faulttype == VM_FAULT_READONLY) {
		LP_SET(lp, LPF_DIRTY);
		writable = 1;
	}

	//put a mapping into the TLB
	/*if(coremap_pageispinned(lp->lp_paddr) == 0) {
		DEBUG(DB_VM, "Page is unpinned!");
	}*/
	mmu_map(as, va, pa, writable);
	lpage_unlock(lp);

	return 0;
}
Exemple #8
0
static int
do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
{
	struct region *reg;
	char *start, *end, *phys;

	if (size == 0)
		return EINVAL;

	/*
	 * Allocate region
	 */
	if (anywhere) {
		size = (size_t)PAGE_ALIGN(size);
		if ((reg = region_alloc(&map->head, size)) == NULL)
			return ENOMEM;
	} else {
		start = (char *)PAGE_TRUNC(*addr);
		end = (char *)PAGE_ALIGN(start + size);
		size = (size_t)(end - start);

		reg = region_find(&map->head, start, size);
		if (reg == NULL || !(reg->flags & REG_FREE))
			return EINVAL;

		reg = region_split(&map->head, reg, start, size);
		if (reg == NULL)
			return ENOMEM;
	}
	reg->flags = REG_READ | REG_WRITE;

	/*
	 * Allocate physical pages, and map them into virtual address
	 */
	if ((phys = page_alloc(size)) == 0)
		goto err1;

	if (mmu_map(map->pgd, phys, reg->addr, size, PG_WRITE))
		goto err2;

	reg->phys = phys;

	/* Zero fill */
	memset(phys_to_virt(phys), 0, reg->size);
	*addr = reg->addr;
	return 0;

 err2:
	page_free(phys, size);
 err1:
	region_free(&map->head, reg);
	return ENOMEM;
}
Exemple #9
0
int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
		phys_addr_t phys, unsigned int pgnr)
{
	return mmu_map(mmu, isp_virt, phys, pgnr);
}
Exemple #10
0
static vm_map_t
do_fork(vm_map_t org_map)
{
	vm_map_t new_map;
	struct region *tmp, *src, *dest;
	int map_type;

	if ((new_map = vm_create()) == NULL)
		return NULL;
	/*
	 * Copy all regions
	 */
	tmp = &new_map->head;
	src = &org_map->head;

	/*
	 * Copy top region
	 */
	*tmp = *src;
	tmp->next = tmp->prev = tmp;

	if (src == src->next)	/* Blank memory ? */
		return new_map;

	do {
		ASSERT(src != NULL);
		ASSERT(src->next != NULL);

		if (src == &org_map->head) {
			dest = tmp;
		} else {
			/* Create new region struct */
			dest = kmem_alloc(sizeof(*dest));
			if (dest == NULL)
				return NULL;

			*dest = *src;	/* memcpy */

			dest->prev = tmp;
			dest->next = tmp->next;
			tmp->next->prev = dest;
			tmp->next = dest;
			tmp = dest;
		}
		if (src->flags == REG_FREE) {
			/*
			 * Skip free region
			 */
		} else {
			/* Check if the region can be shared */
			if (!(src->flags & REG_WRITE) &&
			    !(src->flags & REG_MAPPED)) {
				dest->flags |= REG_SHARED;
			}

			if (!(dest->flags & REG_SHARED)) {
				/* Allocate new physical page. */
				dest->phys = page_alloc(src->size);
				if (dest->phys == 0)
					return NULL;

				/* Copy source page */
				memcpy(phys_to_virt(dest->phys),
				       phys_to_virt(src->phys), src->size);
			}
			/* Map the region to virtual address */
			if (dest->flags & REG_WRITE)
				map_type = PG_WRITE;
			else
				map_type = PG_READ;

			if (mmu_map(new_map->pgd, dest->phys, dest->addr,
				    dest->size, map_type))
				return NULL;
		}
		src = src->next;
	} while (src != &org_map->head);

	/*
	 * No error. Now, link all shared regions
	 */
	dest = &new_map->head;
	src = &org_map->head;
	do {
		if (dest->flags & REG_SHARED) {
			src->flags |= REG_SHARED;
			dest->sh_prev = src;
			dest->sh_next = src->sh_next;
			src->sh_next->sh_prev = dest;
			src->sh_next = dest;
		}
		dest = dest->next;
		src = src->next;
	} while (src != &org_map->head);
	return new_map;
}
Exemple #11
0
static int
do_map(vm_map_t map, void *addr, size_t size, void **alloc)
{
	vm_map_t curmap;
	char *start, *end, *phys;
	size_t offset;
	struct region *reg, *cur, *tgt;
	task_t self;
	int map_type;
	void *tmp;

	if (size == 0)
		return EINVAL;

	/* check fault */
	tmp = NULL;
	if (umem_copyout(&tmp, alloc, sizeof(tmp)))
		return EFAULT;

	start = (char *)PAGE_TRUNC(addr);
	end = (char *)PAGE_ALIGN((char *)addr + size);
	size = (size_t)(end - start);
	offset = (size_t)((char *)addr - start);

	/*
	 * Find the region that includes target address
	 */
	reg = region_find(&map->head, start, size);
	if (reg == NULL || (reg->flags & REG_FREE))
		return EINVAL;	/* not allocated */
	tgt = reg;

	/*
	 * Find the free region in current task
	 */
	self = cur_task();
	curmap = self->map;
	if ((reg = region_alloc(&curmap->head, size)) == NULL)
		return ENOMEM;
	cur = reg;

	/*
	 * Try to map into current memory
	 */
	if (tgt->flags & REG_WRITE)
		map_type = PG_WRITE;
	else
		map_type = PG_READ;

	phys = (char *)tgt->phys + (start - (char *)tgt->addr);
	if (mmu_map(curmap->pgd, phys, cur->addr, size, map_type)) {
		region_free(&curmap->head, reg);
		return ENOMEM;
	}

	cur->flags = tgt->flags | REG_MAPPED;
	cur->phys = phys;

	tmp = (char *)cur->addr + offset;
	umem_copyout(&tmp, alloc, sizeof(tmp));
	return 0;
}
Exemple #12
0
static int
do_attribute(vm_map_t map, void *addr, int attr)
{
	struct region *reg;
	int new_flags = 0;
	void *old_addr, *new_addr = NULL;
	int map_type;

	addr = (void *)PAGE_TRUNC(addr);

	/*
	 * Find the target region.
	 */
	reg = region_find(&map->head, addr, 1);
	if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) {
		return EINVAL;	/* not allocated */
	}
	/*
	 * The attribute of the mapped region can not be changed.
	 */
	if (reg->flags & REG_MAPPED)
		return EINVAL;

	/*
	 * Check new and old flag.
	 */
	if (reg->flags & REG_WRITE) {
		if (!(attr & VMA_WRITE))
			new_flags = REG_READ;
	} else {
		if (attr & VMA_WRITE)
			new_flags = REG_READ | REG_WRITE;
	}
	if (new_flags == 0)
		return 0;	/* same attribute */

	map_type = (new_flags & REG_WRITE) ? PG_WRITE : PG_READ;

	/*
	 * If it is shared region, duplicate it.
	 */
	if (reg->flags & REG_SHARED) {

		old_addr = reg->phys;

		/* Allocate new physical page. */
		if ((new_addr = page_alloc(reg->size)) == 0)
			return ENOMEM;

		/* Copy source page */
		memcpy(phys_to_virt(new_addr), phys_to_virt(old_addr),
		       reg->size);

		/* Map new region */
		if (mmu_map(map->pgd, new_addr, reg->addr, reg->size,
			    map_type)) {
			page_free(new_addr, reg->size);
			return ENOMEM;
		}
		reg->phys = new_addr;

		/* Unlink from shared list */
		reg->sh_prev->sh_next = reg->sh_next;
		reg->sh_next->sh_prev = reg->sh_prev;
		if (reg->sh_prev == reg->sh_next)
			reg->sh_prev->flags &= ~REG_SHARED;
		reg->sh_next = reg->sh_prev = reg;
	} else {
		if (mmu_map(map->pgd, reg->phys, reg->addr, reg->size,
			    map_type))
			return ENOMEM;
	}
	reg->flags = new_flags;
	return 0;
}
Exemple #13
0
/*
 * lpage_fault - handle a fault on a specific lpage. If the page is
 * not resident, get a physical page from coremap and swap it in.
 * 
 * You do not yet need to distinguish a readonly fault from a write
 * fault. When we implement sharing, there will be a difference.
 *
 * Synchronization: Lock the lpage while checking if it's in memory. 
 * If it's not, unlock the page while allocating space and loading the
 * page in. This only works because lpages are not currently sharable.
 * The page should be locked again as soon as it is loaded, but be 
 * careful of interactions with other locks while modifying the coremap.
 *
 * After it has been loaded, the page must be pinned so that it is not
 * evicted while changes are made to the TLB. It can be unpinned as soon
 * as the TLB is updated. 
 */
int
lpage_fault(struct lpage *lp, struct addrspace *as, int faulttype, vaddr_t va)
{
	KASSERT(lp != NULL); // kernel pages never get paged out, thus never fault

	lock_acquire(global_paging_lock);
	if ((lp->lp_paddr & PAGE_FRAME) != INVALID_PADDR) {
		lpage_lock_and_pin(lp);
	} else {
		lpage_lock(lp);
	}
	lock_release(global_paging_lock);

	KASSERT(lp->lp_swapaddr != INVALID_SWAPADDR);

	paddr_t pa = lp->lp_paddr;
	int writable; // 0 if page is read-only, 1 if page is writable

    /* case 1 - minor fault: the frame is still in memory */
	if ((pa & PAGE_FRAME) != INVALID_PADDR) {

		/* make sure it's a minor fault */
		KASSERT(pa != INVALID_PADDR);

		/* Setting the TLB entry's dirty bit */
		writable = (faulttype != VM_FAULT_READ);

		/* update stats */
		spinlock_acquire(&stats_spinlock);
		ct_minfaults++;
		DEBUG(DB_VM, "\nlpage_fault: minor faults = %d.", ct_minfaults);
		spinlock_release(&stats_spinlock);

	} else {
		/* case 2 - major fault: the frame was swapped out to disk */

		/* make sure it is a major fault */
		KASSERT(pa == INVALID_PADDR);

		/* allocate a new frame */
		lpage_unlock(lp); // must not hold lpage locks before entering coremap
		pa = coremap_allocuser(lp); // do evict if needed, also pin coremap
		if ((pa & PAGE_FRAME)== INVALID_PADDR) {
			DEBUG(DB_VM, "lpage_fault: ENOMEM: va=0x%x\n", va);
			return ENOMEM;
		}
		KASSERT(coremap_pageispinned(pa));

		/* retrieving the content from disk */
		lock_acquire(global_paging_lock); // because swap_pagein needs it
		swap_pagein((pa & PAGE_FRAME), lp->lp_swapaddr); // coremap is already pinned above
		lpage_lock(lp);
		lock_release(global_paging_lock);

		/* assert that nobody else did the pagein */
		KASSERT((lp->lp_paddr & PAGE_FRAME) == INVALID_PADDR);

		/* now update PTE with new PFN */
		lp->lp_paddr = pa ; // page is clean

		/* Setting the TLB entry's dirty bit */
		writable = 0; // this way we can detect the first write to a page

		/* update stats */
		spinlock_acquire(&stats_spinlock);
		ct_majfaults++;
		DEBUG(DB_VM, "\nlpage_fault: MAJOR faults = %d", ct_majfaults);
		spinlock_release(&stats_spinlock);
	}

	/* check preconditions before update TLB/PTE */
	KASSERT(coremap_pageispinned(lp->lp_paddr));
	KASSERT(spinlock_do_i_hold(&lp->lp_spinlock));

	/* PTE entry is dirty if the instruction is a write */
	if (writable) {
		LP_SET(lp, LPF_DIRTY);
	}

	/* Put the new TLB entry into the TLB */
	KASSERT(coremap_pageispinned(lp->lp_paddr)); // done in both cases of above IF clause
	mmu_map(as, va, lp->lp_paddr, writable); // update TLB and unpin coremap
	lpage_unlock(lp);

	return 0;
}