Пример #1
0
envid_t
dumbfork(void)
{
	envid_t envid;
	uint8_t *addr;
	int r;
	extern unsigned char end[];

	// Allocate a new child environment.
	// The kernel will initialize it with a copy of our register state,
	// so that the child will appear to have called sys_exofork() too -
	// except that in the child, this "fake" call to sys_exofork()
	// will return 0 instead of the envid of the child.
	envid = sys_exofork();
	if (envid < 0)
		panic("sys_exofork: %e", envid);
	if (envid == 0) {
		// We're the child.
		// The copied value of the global variable 'thisenv'
		// is no longer valid (it refers to the parent!).
		// Fix it and return 0.
		cprintf("reaching in child....\n");
		thisenv = &envs[ENVX(sys_getenvid())];
		return 0;
	}

	// We're the parent.
	// Eagerly copy our entire address space into the child.
	// This is NOT what you should do in your fork implementation.
	for (addr = (uint8_t*) UTEXT; addr < end; addr += PGSIZE)
		duppage(envid, addr);

	// Also copy the stack we are currently running on.
	duppage(envid, ROUNDDOWN(&addr, PGSIZE));

	// Start the child environment running
	if ((r = sys_env_set_status(envid, ENV_RUNNABLE)) < 0)
		panic("sys_env_set_status: %e", r);

	return envid;
}
Пример #2
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
	uint64_t blockno = ((uint64_t)addr - DISKMAP) / BLKSIZE;
	int r;

	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("flush_block of bad va %08x", addr);

	// LAB 5: Your code here.
	void *dst_addr = (void *)(ROUNDDOWN(addr, PGSIZE));
	// Project addition --> Transparent disk encryption.
	if(va_is_mapped(dst_addr))
	{
		if(va_is_dirty(dst_addr))
		{
			if((blockno == 0) || (blockno == 2)) 
			 	ide_write(blockno * BLKSECTS, dst_addr, BLKSECTS);
			else if((blockno == 1))
			{
				if(!s_encrypted)
					ide_write(blockno * BLKSECTS, dst_addr, BLKSECTS);
				else
				{
					r = transparent_disk_encrypt(blockno, dst_addr);
					if(r)
						return;
				}
			}
			else 
			{ 
				r = transparent_disk_encrypt(blockno, dst_addr);
				if(r)
					return;
			}
			sys_page_map(thisenv->env_id, dst_addr, thisenv->env_id, dst_addr, PTE_SYSCALL);
		}
	}

//	panic("flush_block not implemented");
}
Пример #3
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t err = utf->utf_err;
	int r;
	// Check that the faulting access was (1) a write, and (2) to a
	// copy-on-write page.  If not, panic.
	// Hint:
	//   Use the read-only page table mappings at vpt
	//   (see <inc/memlayout.h>).
	//cprintf("pgfault: do page fault here %x\n",utf->utf_eflags);
	// LAB 4: Your code here.
	if((err & FEC_WR) == 0)
		panic("pgfault: fault is not a write (err: %08x va: %08x ip: %08x)",err, addr, utf->utf_eip);
	if ((vpd[PDX(addr)] & PTE_P) == 0 || (vpt[PGNUM(addr)] & PTE_COW) == 0)
		panic ("pgfault: not a write or attempting to access a non-COW page");
	// Allocate a new page, map it at a temporary location (PFTEMP),
	// copy the data from the old page to the new page, then move the new
	// page to the old page's address.
	// Hint:
	//   You should make three system calls.
	//   No need to explicitly delete the old page's mapping.

	// LAB 4: Your code here.
	if ((r = sys_page_alloc (0, (void *)PFTEMP, PTE_U|PTE_P|PTE_W)) < 0)
		panic ("pgfault: page allocation failed : %e", r);
	addr = ROUNDDOWN (addr, PGSIZE);
	memmove (PFTEMP, addr, PGSIZE);
	if((r = sys_page_map (0, PFTEMP, 0, addr, PTE_U|PTE_P|PTE_W)) < 0)
		panic ("pgfault: page mapping failed : %e", r);
	if((r = sys_page_unmap(0,PFTEMP)) < 0)
		panic("pgfault: page unmapping failed : %e", r);
	//cprintf("pgfault: finish\n");
	/* int gaga = 0; */
	/* __asm__ volatile("movl %%esp, %0\n\t" */
	/* 		 :"=r"(gaga) */
	/* 		 ::); */
	/* cprintf("gaga----------%x\n", gaga); */
	//panic("pgfault not implemented");
}
Пример #4
0
// check_pgfault - check correctness of pgfault handler
static void
check_pgfault(void) {
    size_t nr_free_pages_store = nr_free_pages();

    check_mm_struct = mm_create();
    assert(check_mm_struct != NULL);

    struct mm_struct *mm = check_mm_struct;
    pde_t *pgdir = mm->pgdir = boot_pgdir;
    assert(pgdir[0] == 0);

    struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
    assert(vma != NULL);

    insert_vma_struct(mm, vma);

    uintptr_t addr = 0x100;
    assert(find_vma(mm, addr) == vma);

    int i, sum = 0;
    for (i = 0; i < 100; i ++) {
        *(char *)(addr + i) = i;
        sum += i;
    }
    for (i = 0; i < 100; i ++) {
        sum -= *(char *)(addr + i);
    }
    assert(sum == 0);

    page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
    free_page(pde2page(pgdir[0]));
    pgdir[0] = 0;

    mm->pgdir = NULL;
    mm_destroy(mm);
    check_mm_struct = NULL;

    assert(nr_free_pages_store == nr_free_pages());

    cprintf("check_pgfault() succeeded!\n");
}
Пример #5
0
int // Only copies 1024 bytes! server and client call
sys_copy_mem(envid_t env_id, void* addr, void* buf, int perm, bool frombuf)
{
	void *pgva = (void *) ROUNDDOWN(addr, PGSIZE);

	if (sys_page_map(env_id, pgva, curenv->env_id, (void *) UTEMP, 
			 perm) < 0) 
		return -E_INVAL;

	if (frombuf) {
		memmove((void *) (UTEMP + PGOFF(addr)), buf, 1024);
	}
	else {
		memmove(buf, (void *) (UTEMP + PGOFF(addr)), 1024);
	}

	if (sys_page_unmap(curenv->env_id, (void *) UTEMP) < 0)
		return -E_INVAL;

	return 0;
}
Пример #6
0
UINT32
AlignMemoryRange (
  IN UINT32 Addr,
  IN OUT UINTN *Size,
  OUT UINTN  *AddrOffset,
  IN UINTN Alignment
)
{
  // align range
  UINT32 AddrAligned = ROUNDDOWN(Addr, Alignment);

  // calculate offset
  UINTN Offset = Addr - AddrAligned;
  if (AddrOffset!=NULL)
    *AddrOffset = Offset;

  // round and return size
  *Size = ROUNDUP(Offset + (*Size), Alignment);

  return AddrAligned;
}
Пример #7
0
Файл: bc.c Проект: wuxy/mitjos
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_USER constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
	uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;

	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("flush_block of bad va %08x", addr);

	// LAB 5: Your code here.
	int r;
	void *blkva;
	blkva=ROUNDDOWN(addr,BLKSIZE);
	if(va_is_mapped(addr)&&va_is_dirty(addr))
	{
		ide_write(blockno*BLKSECTS,blkva,BLKSECTS);
		if((r=sys_page_map(0,blkva,0,blkva,PTE_USER))<0)
			panic("page mapping failed:%e\n",r);
		
	}
	//panic("flush_block not implemented");
}
Пример #8
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
	uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;

	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("flush_block of bad va %08x", addr);

	// LAB 5: Your code here.
	if( !va_is_mapped(addr) || !(uvpt[PGNUM(addr)] & PTE_D)) { /* no need to flush */
		return;
	}
	int r;
	addr = ROUNDDOWN(addr, PGSIZE);
	if( (r = ide_write(blockno * BLKSECTS, addr, (PGSIZE/SECTSIZE))) != 0) {
		panic("in flush_block, ide_write: %e", r);
	}

	if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL)) < 0)
		panic("in sys_page_map, sys_page_map: %e", r);
}
Пример #9
0
static void
unmap_range_pud(pgd_t *pgdir, pud_t *pud, uintptr_t base, uintptr_t start, uintptr_t end) {
#if PUXSHIFT == PGXSHIFT
	unmap_range_pmd (pgdir, pud, base, start, end);
#else
    assert(start >= 0 && start < end && end <= PUSIZE);
    size_t off, size;
    uintptr_t la = ROUNDDOWN(start, PMSIZE);
    do {
        off = start - la, size = PMSIZE - off;
        if (size > end - start) {
            size = end - start;
        }
        pud_t *pudp = &pud[PUX(la)];
        if (ptep_present(pudp)) {
            unmap_range_pmd(pgdir, KADDR(PUD_ADDR(*pudp)), base + la, off, off + size);
        }
        start += size, la += PMSIZE;
    } while (start != 0 && start < end);
#endif
}
Пример #10
0
//
// Allocate len bytes of physical memory for environment env,
// and map it at virtual address va in the environment's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
static void
segment_alloc(struct Env *e, void *va, size_t len)
{
	// LAB 3: Your code here.
	// (But only if you need it for load_icode.)
	//
	// Hint: It is easier to use segment_alloc if the caller can pass
	//   'va' and 'len' values that are not page-aligned.
	//   You should round va down, and round len up.
    	
    	// DEC 7,2010,sunus
	uint32_t align_va = ROUNDDOWN((uint32_t)va, PGSIZE);
	size_t i,align_len = ROUNDUP(len, PGSIZE); 
	struct Page *pmem;
	for(i = 0 ;  i < align_len ; i+= PGSIZE)
	  {
	    assert(page_alloc(&pmem) == 0);
	    assert(page_insert(e->env_pgdir,pmem,va + i,PTE_W|PTE_U) == 0);
	  }
	return ;
}
Пример #11
0
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range.  You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission.  These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{

	// LAB 3: Your code here.
	struct PageInfo *pg;
	pte_t *pte;
	pte_t **pte_store=&pte;
	uintptr_t end = ROUNDUP((uintptr_t)(va+len),PGSIZE);
	uintptr_t start_addr = ROUNDDOWN((uintptr_t)va , PGSIZE) ; 

    for( ; start_addr < end  ; start_addr +=PGSIZE ){
	   pg=page_lookup(env->env_pgdir,(void*)start_addr,pte_store);
       if( (!pg) || ((**pte_store & perm) != perm) || 
	    start_addr >= ULIM){
	    user_mem_check_addr = start_addr<(uintptr_t )va ?(uintptr_t )va: 		    start_addr;
		return -E_FAULT;
	    }    
    }
	return 0;
}
Пример #12
0
Файл: env.c Проект: ichaos/jos
//
// Allocate len bytes of physical memory for environment env,
// and map it at virtual address va in the environment's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
static void
segment_alloc(struct Env *e, void *va, size_t len)
{
    // LAB 3: Your code here.
    // (But only if you need it for load_icode.)
    //
    // Hint: It is easier to use segment_alloc if the caller can pass
    //   'va' and 'len' values that are not page-aligned.
    //   You should round va down, and round len up.
    len = ROUNDUP(len, PGSIZE);//round up len
    va = ROUNDDOWN(va, PGSIZE);//round va down;
    int i, r;
    pte_t *pgtb;
    struct Page *p = NULL;
    for(i=0; i<len; i+=PGSIZE) {
        if ((r = page_alloc(&p)) < 0)
            panic("segment_alloc : No more free memory.\n");
        page_insert(e->env_pgdir, p, (void *)(va+i), PTE_USER);
    }
    //cprintf("segment_alloc is over.\n");
}
Пример #13
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t err = utf->utf_err;

	// Round the address down to the nearest page
	addr = ROUNDDOWN(addr, PGSIZE);

	// Check that the faulting access was (1) a write, and (2) to a
	// copy-on-write page.  If not, panic.
	// Hint:
	//   Use the read-only page table mappings at uvpt
	//   (see <inc/memlayout.h>).
	//
	// If the second bit of err is not set, the fault is a read
	if((err&2) == 0)
		panic("fault was not caused by a write\n");
	if((uvpt[PGNUM(addr)]&PTE_COW) == 0)
		panic("faulting page was not copy-on-write");

	// Allocate a new page, map it at a temporary location (PFTEMP),
	// copy the data from the old page to the new page, then move the new
	// page to the old page's address.
	// Hint:
	//   You should make three system calls.
	//   No need to explicitly delete the old page's mapping.
	//
	// First attempt to allocate a new page at the temporary address
	if(sys_page_alloc(0, PFTEMP, PTE_U|PTE_W) != 0)
		panic("couldn't allocate a new page for copy-on-write");

	// Next, copy the memory from the old page to the new
	memcpy(PFTEMP, addr, PGSIZE);

	// Finally, remap the new page to the old address.  Don't bother
	//  cleaning up PFTEMP.
	if(sys_page_map(0, PFTEMP, 0, addr, PTE_U|PTE_W) != 0)
		panic("couldn't remap the temporary page for copy-on-write");
}
Пример #14
0
void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
			struct tee_mmu_info *mmu)
{
	struct core_mmu_table_info pg_info;
	struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
	struct pgt *pgt;
	size_t n;
	vaddr_t base;
	vaddr_t end;

	if (!mmu->size)
		return;	/* Nothing to map */

	/* Find the last valid entry */
	n = mmu->size;
	while (true) {
		n--;
		if (mmu->table[n].size)
			break;
		if (!n)
			return;	/* Nothing to map */
	}

	/*
	 * Allocate all page tables in advance.
	 */
	base = ROUNDDOWN(mmu->table[0].va, CORE_MMU_PGDIR_SIZE);
	end = ROUNDUP(mmu->table[n].va + mmu->table[n].size,
		      CORE_MMU_PGDIR_SIZE);
	pgt_alloc(pgt_cache, (end - base) >> CORE_MMU_PGDIR_SHIFT);
	pgt = SLIST_FIRST(pgt_cache);

	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);

	for (n = 0; n < mmu->size; n++) {
		if (!mmu->table[n].size)
			continue;
		set_pg_region(dir_info, mmu->table + n, &pgt, &pg_info);
	}
}
Пример #15
0
// Allocate a page of memory and map it at 'va' with permission
// 'perm' in the address space of 'envid'.
// The page's contents are set to 0.
// If a page is already mapped at 'va', that page is unmapped as a
// side effect.
//
// perm -- PTE_U | PTE_P must be set, PTE_AVAIL | PTE_W may or may not be set,
//         but no other bits may be set.
//
// Return 0 on success, < 0 on error.  Errors are:
//	-E_BAD_ENV if environment envid doesn't currently exist,
//		or the caller doesn't have permission to change envid.
//	-E_INVAL if va >= UTOP, or va is not page-aligned.
//	-E_INVAL if perm is inappropriate (see above).
//	-E_NO_MEM if there's no memory to allocate the new page,
//		or to allocate any necessary page tables.
static int
sys_page_alloc(envid_t envid, void *va, int perm)
{
	// Hint: This function is a wrapper around page_alloc() and
	//   page_insert() from kern/pmap.c.
	//   Most of the new code you write should be to check the
	//   parameters for correctness.
	//   If page_insert() fails, remember to free the page you
	//   allocated!
	// LAB 4: Your code here.
	struct Env *task;
	struct Page *page;

	//cprintf("sys_page_alloc: [%08x] .\n", envid);
	if (envid2env(envid, &task, 1) < 0)
		return -E_BAD_ENV;

	if (page_alloc(&page) < 0)
		return -E_NO_MEM;

	if ((unsigned int)va >= UTOP || va != ROUNDDOWN(va, PGSIZE))
		return -E_INVAL;

	// PTE_U and PTE_P must be set
	if (!(perm & PTE_U) || !(perm & PTE_P))
		return -E_INVAL;
	// other bits than PTE_{U,P,W,AVAIL} are set
	if (perm & ((~(PTE_U | PTE_P | PTE_W | PTE_AVAIL)) & 0xfff))
		return -E_INVAL;

	memset(page2kva(page), 0, PGSIZE);
	if (page_insert(task->env_pgdir, page, va, perm) < 0) {
		page_free(page);
		return -E_NO_MEM;
	}

	//cprintf("allocated page: [%08x].\n", page2pa(page));
	return 0;
}
Пример #16
0
// Fault any disk block that is read or written in to memory by
// loading it from disk.
// Hint: Use ide_read and BLKSECTS.
static void
bc_pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;
	int r;
	uint32_t sectno;

	// Check that the fault was within the block cache region
	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("page fault in FS: eip %08x, va %08x, err %04x",
		      utf->utf_eip, addr, utf->utf_err);

	// Allocate a page in the disk map region and read the
	// contents of the block from the disk into that page.
	//
	// LAB 5: Your code here
	addr = ROUNDDOWN(addr,PGSIZE);
	sectno = blockno*BLKSECTS;
	
	r = sys_page_alloc(0,addr,PTE_P|PTE_U|PTE_W);
        if(r)
                panic("Page Allocation Failed: %e",r);
        
	r = ide_read(sectno , addr, BLKSECTS);
        if(r)
		panic("IDE read failed: %e",r);

	//bitmap[blockno / 32] = 0; //clear bitmap to mark_in_use

	// Sanity check the block number. (exercise for the reader:
	// why do we do this *after* reading the block in?)
	if (super && blockno >= super->s_nblocks)
		panic("reading non-existent block %08x\n", blockno);

	// Check that the block we read was allocated.
	if (bitmap && block_is_free(blockno))
		panic("reading free block %08x\n", blockno);
}
Пример #17
0
int
dumbfork(void)
{
	int addr, envid, r;
	extern u_char end[];

	// Allocate a new child environment.
	// The kernel will initialize it with a copy of our register state,
	// so that the child will appear to have called sys_env_alloc() too -
	// except that in the child, this "fake" call to sys_env_alloc()
	// will return 0 instead of the envid of the child.
	envid = sys_env_alloc();
	if (envid < 0)
		panic("sys_env_fork: %e", envid);
	if (envid == 0) {
		// We're the child.
		// The copied value of the global variable 'env'
		// is no longer valid (it refers to the parent!).
		// Fix it and return 0.
		env = &envs[ENVX(sys_getenvid())];
		return 0;
	}

	// We're the parent.
	// Eagerly copy our entire address space into the child.
	// This is NOT what you should do in your fork implementation.
	for (addr=UTEXT; addr<(u_int)end; addr+=BY2PG)
		duppage(envid, addr);

	// Also copy the stack we are currently running on.
	duppage(envid, ROUNDDOWN((u_int)&addr, BY2PG));

	// Start the child environment running
	// (at the point above where the register state was copied).
	if ((r=sys_set_status(envid, ENV_RUNNABLE)) < 0)
		panic("sys_set_status: %e", r);

	return envid;
}
Пример #18
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_USER constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
    uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;

    if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
        panic("flush_block of bad va %08x", addr);

    // LAB 5: Your code here.
    //panic("flush_block not implemented");
    addr=ROUNDDOWN(addr, PGSIZE);

    if(va_is_mapped(addr) && va_is_dirty(addr))
    {
        ide_write(BLKSECTS*blockno, addr, BLKSECTS);

        if(sys_page_map(0, addr, 0, addr, PTE_USER) < 0)
        {
            panic ("flush_block(), sys_page_map error");
        }
    }
}
Пример #19
0
Файл: syscall.c Проект: yahu/JOS
// Print a string to the system console.
// The string is exactly 'len' characters long.
// Destroys the environment on memory errors.
static void
sys_cputs(const char *s, size_t len)
{
	// Check that the user has permission to read memory [s, s+len).
	// Destroy the environment if not.

	// LAB 3: Your code here.
    user_mem_assert(curenv, (void *)s, len, PTE_U | PTE_P | PTE_W);

	pte_t * ptx;
	uint32_t st = ROUNDDOWN((uint32_t) s,PGSIZE);

    while(st < (uint32_t)s + len ){
        ptx=pgdir_walk(curenv->env_pgdir,(void *)st, 0);
        if(!ptx || !(*ptx & PTE_P) || !(*ptx & PTE_U) )
            env_destroy(curenv);
        st+=PGSIZE;
    }

	// Print the string supplied by the user.
	cprintf("%.*s", len, s);
}
Пример #20
0
Файл: thread.c Проект: M1cha/lk
void arch_thread_initialize(thread_t *t)
{
	// create a default stack frame on the stack
	vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;

	// make sure the top of the stack is 8 byte aligned for EABI compliance
	stack_top = ROUNDDOWN(stack_top, 8);

	struct context_switch_frame *frame = (struct context_switch_frame *)(stack_top);
	frame--;

	// fill it in
	memset(frame, 0, sizeof(*frame));
	frame->lr = (vaddr_t)&initial_thread_func;

	// set the stack pointer
	t->arch.sp = (vaddr_t)frame;

#if ARM_WITH_VFP
    arm_fpu_thread_initialize(t);
#endif
}
Пример #21
0
// do_mmap - add a vma with addr, len and flags(VM_READ/M_WRITE/VM_STACK)
int
do_mmap(uintptr_t __user *addr_store, size_t len, uint32_t mmap_flags) {
    struct mm_struct *mm = current->mm;
    if (mm == NULL) {
        panic("kernel thread call mmap!!.\n");
    }
    if (addr_store == NULL || len == 0) {
        return -E_INVAL;
    }

    int ret = -E_INVAL;

    uintptr_t addr;

    lock_mm(mm);
    if (!copy_from_user(mm, &addr, addr_store, sizeof(uintptr_t), 1)) {
      goto out_unlock;
    }

    uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE);
    addr = start, len = end - start;

    uint32_t vm_flags = VM_READ;
    if (mmap_flags & MMAP_WRITE) vm_flags |= VM_WRITE;
    if (mmap_flags & MMAP_STACK) vm_flags |= VM_STACK;

    ret = -E_NO_MEM;
    if (addr == 0) {
      if ((addr = get_unmapped_area(mm, len)) == 0) {
        goto out_unlock;
      }
    }
    if ((ret = mm_map(mm, addr, len, vm_flags, NULL)) == 0) {
      copy_to_user (mm, addr_store, &addr, sizeof (uintptr_t));
    }
out_unlock:
    unlock_mm(mm);
    return ret;
}
Пример #22
0
bool
copy_string(struct mm_struct *mm, char *dst, const char *src, size_t maxn) {
    size_t alen, part = ROUNDDOWN((uintptr_t)src + PGSIZE, PGSIZE) - (uintptr_t)src;
    while (1) {
        if (part > maxn) {
            part = maxn;
        }
        if (!user_mem_check(mm, (uintptr_t)src, part, 0)) {
            return 0;
        }
        if ((alen = strnlen(src, part)) < part) {
            memcpy(dst, src, alen + 1);
            return 1;
        }
        if (part == maxn) {
            return 0;
        }
        memcpy(dst, src, part);
        dst += part, src += part, maxn -= part;
        part = PGSIZE;
    }
}
Пример #23
0
// Fault any disk block that is read in to memory by
// loading it from disk.
static void
bc_pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;
	int r;

	// Check that the fault was within the block cache region
	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("page fault in FS: eip %08x, va %08x, err %04x",
		      utf->utf_eip, addr, utf->utf_err);

	// Sanity check the block number.
	if (super && blockno >= super->s_nblocks)
		panic("reading non-existent block %08x\n", blockno);

	// Allocate a page in the disk map region, read the contents
	// of the block from the disk into that page.
	// Hint: first round addr to page boundary. fs/ide.c has code to read
	// the disk.
	//
	// LAB 5: you code here:
	addr = ROUNDDOWN(addr, PGSIZE);
	if (sys_page_alloc(0, addr, (PTE_P|PTE_U|PTE_W)) < 0)
		panic("bc.c/bc_pgfault: page allocate failed.\n");
	ide_read(blockno * BLKSECTS, addr, BLKSECTS);

	// Clear the dirty bit for the disk block page since we just read the
	// block from disk
	if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL)) < 0)
		panic("in bc_pgfault, sys_page_map: %e", r);

	// Check that the block we read was allocated. (exercise for
	// the reader: why do we do this *after* reading the block
	// in?)
	if (bitmap && block_is_free(blockno))
		panic("reading free block %08x\n", blockno);
}
Пример #24
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
	uint64_t blockno = ((uint64_t)addr - DISKMAP) / BLKSIZE;
	int r;	

	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("flush_block of bad va %08x", addr);

	// LAB 5: Your code here.
	addr = ROUNDDOWN(addr,BLKSIZE);
	if (va_is_mapped(addr) && va_is_dirty(addr))
#ifdef VMM_GUEST
		host_write(blockno*BLKSECTS, addr, BLKSECTS);
#else
		ide_write(blockno*BLKSECTS, addr, BLKSECTS);
#endif
	if(va_is_mapped(addr)) {
		if ((r = sys_page_map(0,addr,0,addr,PTE_SYSCALL&~PTE_D))<0)
			cprintf("error in flushing the block : %e\n",r);
	}
	//panic("flush_block not implemented");
}
Пример #25
0
//
// Check that an environment is allowed to access the range of memory
// [va, va+len) with permissions 'perm | PTE_P'.
// Normally 'perm' will contain PTE_U at least, but this is not required.
// 'va' and 'len' need not be page-aligned; you must test every page that
// contains any of that range.  You will test either 'len/PGSIZE',
// 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
//
// A user program can access a virtual address if (1) the address is below
// ULIM, and (2) the page table gives it permission.  These are exactly
// the tests you should implement here.
//
// If there is an error, set the 'user_mem_check_addr' variable to the first
// erroneous virtual address.
//
// Returns 0 if the user program can access this range of addresses,
// and -E_FAULT otherwise.
//
int
user_mem_check(struct Env *env, const void *va, size_t len, int perm)
{
	// LAB 3: Your code here.
	uintptr_t lva = (uintptr_t) va;
	uintptr_t rva = (uintptr_t) va + len - 1;
	uintptr_t idx;
	pte_t *pte;
	perm |= PTE_U | PTE_P;

	for (idx = lva; idx <= rva; idx = ROUNDDOWN(idx+PGSIZE, PGSIZE)) {
		if (idx >= ULIM) {
			user_mem_check_addr = idx;
			return -E_FAULT;
		}
		pte = pgdir_walk (env->env_pgdir, (void*)idx, 0);
		if (pte == NULL || (*pte & perm) != perm) {
			user_mem_check_addr = idx;		
			return -E_FAULT;
		}
	}
	return 0;
}
Пример #26
0
Файл: vmm.c Проект: jefjin/ucore
int
mm_brk(struct mm_struct *mm, uintptr_t addr, size_t len) {
    uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE);
    if (!USER_ACCESS(start, end)) {
        return -E_INVAL;
    }

    int ret;
    if ((ret = mm_unmap(mm, start, end - start)) != 0) {
        return ret;
    }
    uint32_t vm_flags = VM_READ | VM_WRITE;
    struct vma_struct *vma = find_vma(mm, start - 1);
    if (vma != NULL && vma->vm_end == start && vma->vm_flags == vm_flags) {
        vma->vm_end = end;
        return 0;
    }
    if ((vma = vma_create(start, end, vm_flags)) == NULL) {
        return -E_NO_MEM;
    }
    insert_vma_struct(mm, vma);
    return 0;
}
Пример #27
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
    void *addr = (void *) utf->utf_fault_va;
    uint32_t err = utf->utf_err;
    int r;

    // Check that the faulting access was (1) a write, and (2) to a
    // copy-on-write page.  If not, panic.
    // Hint:
    //   Use the read-only page table mappings at uvpt
    //   (see <inc/memlayout.h>).

    // LAB 4: Your code here.
    if(!(
                ((err & FEC_WR) == FEC_WR) && (uvpd[PDX(addr)] & PTE_P) &&
                (uvpt[PGNUM(addr)] & PTE_P) && (uvpt[PGNUM(addr)] & PTE_COW)
            )
      )
        panic("err isn't caused by write or cow\n");
    // Allocate a new page, map it at a temporary location (PFTEMP),
    // copy the data from the old page to the new page, then move the new
    // page to the old page's address.
    // Hint:
    //   You should make three system calls.

    // LAB 4: Your code here.
    void *radd = ROUNDDOWN(addr, PGSIZE);
    if(sys_page_alloc(0, PFTEMP, PTE_U | PTE_W | PTE_P) < 0)
        panic("sys_page_alloc fails\n");
    memmove(PFTEMP, radd, PGSIZE);
    if(sys_page_map(0, PFTEMP, 0, radd, PTE_U | PTE_W | PTE_P) < 0)
        panic("sys_page_map fails\n");
    sys_page_unmap(0, PFTEMP);

    //panic("pgfault not implemented");
}
Пример #28
0
// Look up the struct BlockInfo for block 'blocknum'.
// Automatically allocates a page for BlockInfo if required & 'create != 0'.
// The resulting BlockInfo is stored in '*result'.
// Returns 0 on success, < 0 on failure.  Error codes include -E_INVAL
//   (blocknum out of range), -E_IO (I/O error), -E_NOT_FOUND (blocknum has
//   no BlockInfo yet and 'create' is 0).
//
static int
get_block_info(blocknum_t blocknum, struct BlockInfo **result, int create)
{
	struct BlockInfo *bip;
	int r;

	*result = 0;
	if (blocknum >= (blocknum_t) (DISKSIZE / BLKSIZE))
		return -E_INVAL;

	bip = &((struct BlockInfo *) BLOCKINFOMAP)[blocknum];

	// ensure that page exists
	if (!(vpd[PDX(bip)] & PTE_P) || !(vpt[PGNUM(bip)] & PTE_P)) {
		struct BlockInfo *page_bip, *end_page_bip;

		if (!create)
			return -E_NOT_FOUND;

		page_bip = (struct BlockInfo *) ROUNDDOWN(bip, PGSIZE);
		if ((r = sys_page_alloc(0, page_bip, PTE_P | PTE_U | PTE_W)) < 0)
			return r;

		// initialize contents
		end_page_bip = (struct BlockInfo *) ROUNDUP(page_bip + 1, PGSIZE);
		while (page_bip < end_page_bip) {
			page_bip->bi_head = 0;
			page_bip->bi_nlocked = 0;
			page_bip->bi_count = 0;
			page_bip->bi_initialized = 0;
			++page_bip;
		}
	}

	*result = bip;
	return 0;
}
Пример #29
0
void
kmstartup(void)
{
	char *cp;
	struct gmonparam *p = &_gmonparam;
	int size;

	/*
	 * Round lowpc and highpc to multiples of the density we're using
	 * so the rest of the scaling (here and in gprof) stays in ints.
	 */
	p->lowpc = ROUNDDOWN(KERNBASE, HISTFRACTION * sizeof(HISTCOUNTER));
	p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
	p->textsize = p->highpc - p->lowpc;
	printf("Profiling kernel, textsize=%ld [%lx..%lx]\n",
	       p->textsize, p->lowpc, p->highpc);
	p->kcountsize = p->textsize / HISTFRACTION;
	p->hashfraction = HASHFRACTION;
	p->fromssize = p->textsize / HASHFRACTION;
	p->tolimit = p->textsize * ARCDENSITY / 100;
	if (p->tolimit < MINARCS)
		p->tolimit = MINARCS;
	else if (p->tolimit > MAXARCS)
		p->tolimit = MAXARCS;
	p->tossize = p->tolimit * sizeof(struct tostruct);
	size = p->kcountsize + p->fromssize + p->tossize;
	cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait);
	if (cp == NULL) {
		printf("No memory for profiling.\n");
		return;
	}
	p->tos = (struct tostruct *)cp;
	cp += p->tossize;
	p->kcount = (u_short *)cp;
	cp += p->kcountsize;
	p->froms = (u_short *)cp;
}
Пример #30
0
//
// Allocate len bytes of physical memory for environment env,
// and map it at virtual address va in the environment's address space.
// Does not zero or otherwise initialize the mapped pages in any way.
// Pages should be writable by user and kernel.
// Panic if any allocation attempt fails.
//
static void
segment_alloc(struct Env *e, void *va, size_t len)
{
	// LAB 3: Your code here.
	// (But only if you need it for load_icode.)
	//
	// Hint: It is easier to use segment_alloc if the caller can pass
	//   'va' and 'len' values that are not page-aligned.
	//   You should round va down, and round len up.
    va = ROUNDDOWN(va, PGSIZE);
    len = ROUNDUP(len, PGSIZE);
    struct Page *pg;
    int r;
    for(;len>0;len-=PGSIZE, va+=PGSIZE){
        r = page_alloc(&pg);//alloc a page
        if(r!=0){
            panic("alloc failed with none physical page\n");
        }
        r = page_insert(e->env_pgdir, pg, va, PTE_U|PTE_W);//insert the page into env_pgdir
        if(r!=0){
            panic("alloc failed with mapping failed\n");
        }
    }
}