コード例 #1
0
ファイル: spawn.c プロジェクト: bosswissam/djos
// Copy the mappings for shared pages into the child address space.
static int
copy_shared_pages(envid_t child)
{
	// LAB 7: Your code here.
	int r;
	uint32_t i, j, pn;

	// Copy shared address space to child
	for (i = PDX(UTEXT); i < PDX(UXSTACKTOP); i++) {
		if (vpd[i] & PTE_P) { // If page table present
			for (j = 0; j < NPTENTRIES; j++) {	
				pn = PGNUM(PGADDR(i, j, 0));
				if (pn == PGNUM(UXSTACKTOP - PGSIZE)) {
					break; // Don't map when reach uxstack
				}

				if ((vpt[pn] & PTE_P) && 
				    (vpt[pn] & PTE_SHARE)) {
					if ((r = sys_page_map(0, 
							      (void *) (pn * PGSIZE), 
							      child, 
							      (void *) (pn * PGSIZE), 
							      vpt[pn] & PTE_SYSCALL)) < 0) {
			return r;
		}

				}			
			}
		}
	}

	return 0;
}
コード例 #2
0
ファイル: umm.c プロジェクト: Nukem9/Dune
void *umm_shmat(int shmid, void *addr, int shmflg)
{
	struct shmid_ds shm;
	unsigned long len;
	void *mem;
	int ret;
	int perm;
	int prot = PROT_READ;
	int adjust_mmap_len = 0;

	if (!(shmflg & SHM_RDONLY))
		prot |= PROT_WRITE;

	perm = prot_to_perm(prot);

	if (shmctl(shmid, IPC_STAT, &shm) == -1)
		return (void*) -1;

	len = shm.shm_segsz;

	if (!addr) {
		if (!umm_space_left(len))
			return (void*) -ENOMEM;
		adjust_mmap_len = 1;
		addr = (void *) umm_get_map_pos() - PGADDR(len + PGSIZE - 1);
	} else if (!mem_ref_is_safe(addr, len))
		return (void*) -EINVAL;

	mem = shmat(shmid, addr, shmflg);
	if (mem != addr)
		return (void*) (long) -errno;

	ret = dune_vm_map_phys(pgroot, addr, len,
			      (void *) dune_va_to_pa(addr),
			      perm);
	if (ret) {
		shmdt(addr);
		return (void*) (long) ret;
	}

	if (adjust_mmap_len)
		mmap_len +=  PGADDR(len + PGSIZE - 1);

	return addr;
}
コード例 #3
0
ファイル: umm.c プロジェクト: Nukem9/Dune
void *umm_mremap(void *old_address, size_t old_size, size_t new_size, int flags,
		 void *new_address)
{
	int adjust_mmap_len = 0;
	void *ret;

	if (!mem_ref_is_safe(old_address, old_size))
		return (void*) -EACCES;

	if (flags & MREMAP_FIXED) {
		if (!mem_ref_is_safe(new_address, new_size))
			return (void*) -EACCES;
	} else {
		if (!umm_space_left(new_size))
			return (void*) -ENOMEM;
		adjust_mmap_len = 1;
		new_address = (void *) umm_get_map_pos() - PGADDR(new_size + PGSIZE - 1);
	}

	/* XXX add support in future */
	if (!(flags & MREMAP_MAYMOVE))
		return (void*) -EINVAL;

	flags |= MREMAP_FIXED | MREMAP_MAYMOVE;

	ret = mremap(old_address, old_size, new_size, flags, new_address);
	if (ret != new_address)
		return (void*) (long) -errno;

        if (adjust_mmap_len)
                mmap_len +=  PGADDR(new_size + PGSIZE - 1);

	dune_vm_unmap(pgroot, old_address, old_size);

        if (dune_vm_map_phys(pgroot, new_address, new_size,
                              (void *) dune_va_to_pa(new_address),
                              prot_to_perm(PROT_READ | PROT_WRITE))) {
		printf("help me!\n");
		exit(1);
	}

	return ret;
}
コード例 #4
0
ファイル: env.c プロジェクト: gzs715/JOS
//
// Frees env e and all memory it uses.
// 
void
env_free(struct Env *e)
{
	pte_t *pt;
	uint32_t pdeno, pteno;
	physaddr_t pa;
	
	// If freeing the current environment, switch to boot_pgdir
	// before freeing the page directory, just in case the page
	// gets reused.
	if (e == curenv)
		lcr3(boot_cr3);

	// Note the environment's demise.
	// cprintf("[%08x] free env %08x\n", curenv ? curenv->env_id : 0, e->env_id);

	// Flush all mapped pages in the user portion of the address space
	static_assert(UTOP % PTSIZE == 0);
	for (pdeno = 0; pdeno < PDX(UTOP); pdeno++) {

		// only look at mapped page tables
		if (!(e->env_pgdir[pdeno] & PTE_P))
			continue;

		// find the pa and va of the page table
		pa = PTE_ADDR(e->env_pgdir[pdeno]);
		pt = (pte_t*) KADDR(pa);

		// unmap all PTEs in this page table
		for (pteno = 0; pteno <= PTX(~0); pteno++) {
			if (pt[pteno] & PTE_P)
				page_remove(e->env_pgdir, PGADDR(pdeno, pteno, 0));
		}

		// free the page table itself
		e->env_pgdir[pdeno] = 0;
		page_decref(pa2page(pa));

	}

	// free the page directory
	pa = e->env_cr3;
	e->env_pgdir = 0;
	e->env_cr3 = 0;
	

	page_decref(pa2page(pa));
	

	// return the environment to the free list
	e->env_status = ENV_FREE;
	LIST_INSERT_HEAD(&env_free_list, e, env_link);
}
コード例 #5
0
ファイル: umm.c プロジェクト: Nukem9/Dune
int umm_alloc_stack(uintptr_t *stack_top)
{
	int ret;
	uintptr_t base = umm_get_map_pos();

	if (!umm_space_left(APP_STACK_SIZE))
		return -ENOMEM;

	// Make sure the last page is left unmapped so hopefully
	// we can at least catch most common stack overruns.
	// If not, the untrusted code is only harming itself.
	ret = umm_mmap_anom((void *) (PGADDR(base) -
			    APP_STACK_SIZE + PGSIZE),
			    APP_STACK_SIZE - PGSIZE,
			    PROT_READ | PROT_WRITE, 0);
	if (ret)
		return ret;

	mmap_len += APP_STACK_SIZE + PGOFF(base);
	*stack_top = PGADDR(base);
	return 0;
}
コード例 #6
0
ファイル: fork.c プロジェクト: hvpeteet/BackupGatech
//
// Map our virtual page pn (address pn*PGSIZE) into the target envid
// at the same virtual address.  If the page is writable or copy-on-write,
// the new mapping must be created copy-on-write, and then our mapping must be
// marked copy-on-write as well.  (Exercise: Why do we need to mark ours
// copy-on-write again if it was already copy-on-write at the beginning of
// this function?)
//
// Returns: 0 on success, < 0 on error.
// It is also OK to panic on error.
//
static int
duppage(envid_t envid, unsigned pn)
{
  
  int r;

  // LAB 4: Your code here.
  envid_t e_id = sys_getenvid();
  //cprintf("in duppage(srcenv=%d, dstenv=%d, page=%x, addr=0x%x", e_id, envid, pn, pn*PGSIZE);

  void *addr = (void*) (pn * PGSIZE);
  if (((uvpt[pn] & PTE_W) || (uvpt[pn] & PTE_COW)) && !(uvpt[pn] & PTE_SHARE)) {
    // Make the mapping COW
    //cprintf("-------COW\n");
    int mapping_err;
    mapping_err = sys_page_map(e_id, addr, envid, addr, PTE_P | PTE_COW | PTE_U);
    if (mapping_err) {
      panic("mapping other environment's page to current env failed");
    }
    // mapping_err = sys_page_unmap(e_id, addr);
    // if (mapping_err) {
    //   panic("unmapping environment's page to self failed");
    // }
    mapping_err = sys_page_map(envid, addr, e_id, addr, PTE_P | PTE_COW | PTE_U);
    if (mapping_err) {
      panic("dst mapping back to src failed with error %e", mapping_err);
    }
  } else {
    // Normal mapping
    //cprintf("-------NORMAL\n");
    sys_page_map(0, PGADDR(0, pn, 0),
                 envid, PGADDR(0, pn, 0),
                 (uvpt[pn] & PTE_SYSCALL));
    // sys_page_map(e_id, addr, envid, addr, PTE_P | PTE_U);
  }
  return 0;
}
コード例 #7
0
ファイル: umm.c プロジェクト: Nukem9/Dune
unsigned long umm_mmap(void *addr, size_t len, int prot,
	       int flags, int fd, off_t offset)
{
	int adjust_mmap_len = 0;
	int ret;

#if USE_BIG_MEM
	if (len >= BIG_PGSIZE && (flags & MAP_ANONYMOUS) && !addr)
		return umm_map_big(len, prot);
#endif

	if (!addr) {
		if (!umm_space_left(len))
			return -ENOMEM;
		adjust_mmap_len = 1;
		addr = (void *) umm_get_map_pos() - PGADDR(len + PGSIZE - 1);
	} else if (!mem_ref_is_safe(addr, len))
		return -EINVAL;

	if (flags & MAP_ANONYMOUS) {
		ret = umm_mmap_anom(addr, len, prot, 0);
		if (ret)
			return ret;
	} else if (fd > 0) {
		ret = umm_mmap_file(addr, len, prot, flags, fd, offset);
		if (ret)
			return ret;
	} else
		return -EINVAL;

	if (adjust_mmap_len)
		mmap_len +=  PGADDR(len + PGSIZE - 1);

	return (unsigned long) addr;

}
コード例 #8
0
ファイル: umm.c プロジェクト: Nukem9/Dune
unsigned long umm_brk(unsigned long brk)
{
	size_t len;
	int ret;

	if (!brk)
		return UMM_ADDR_START;

	if (brk < UMM_ADDR_START)
		return -EINVAL;

	len = brk - UMM_ADDR_START;

#if USE_BIG_MEM
	len = BIG_PGADDR(len + BIG_PGSIZE - 1);
#else
	len = PGADDR(len + PGSIZE - 1);
#endif

	if (!umm_space_left(len))
		return -ENOMEM;

	if (len == brk_len) {
		return brk;
	} else if (len < brk_len) {
		ret = munmap((void *) (UMM_ADDR_START + len), brk_len - len);
		if (ret)
			return -errno;
		dune_vm_unmap(pgroot, (void *) (UMM_ADDR_START + len),
			      brk_len - len);
	} else {
		ret = umm_mmap_anom((void *) (UMM_ADDR_START + brk_len),
				    len - brk_len,
				    PROT_READ | PROT_WRITE, USE_BIG_MEM);
		if (ret)
			return ret;
	}

	brk_len = len;
	return brk;
}
コード例 #9
0
ファイル: vm.c プロジェクト: peasentspring/dune
 void dune_vm_default_pgflt_handler(uintptr_t addr, uint64_t fec)
{
	ptent_t *pte = NULL;
	int rc;

	/*
	 * Assert on present and reserved bits.
	 */
	assert(!(fec & (FEC_P | FEC_RSV)));

	rc = dune_vm_lookup(pgroot, (void *) addr, 0, &pte);
	assert(rc == 0);

	if ((fec & FEC_W) && (*pte & PTE_COW)) {
		void *newPage;
		struct page *pg = dune_pa2page(PTE_ADDR(*pte));
		ptent_t perm = PTE_FLAGS(*pte);

		// Compute new permissions
		perm &= ~PTE_COW;
		perm |= PTE_W;

		if (dune_page_isfrompool(PTE_ADDR(*pte)) && pg->ref == 1) {
			*pte = PTE_ADDR(*pte) | perm;
			return;
		}

		// Duplicate page
		newPage = alloc_page();
		memcpy(newPage, (void *)PGADDR(addr), PGSIZE);

		// Map page
		if (dune_page_isfrompool(PTE_ADDR(*pte))) {
			dune_page_put(pg);
		}
		*pte = PTE_ADDR(newPage) | perm;

		// Invalidate
		dune_flush_tlb_one(addr);
	}
}
コード例 #10
0
ファイル: pmap.c プロジェクト: yuki252111/os
//
// Initialize page structure and memory free list.
// After this is done, NEVER use boot_alloc again.  ONLY use the page
// allocator functions below to allocate and deallocate physical
// memory via the page_free_list.
//
void
page_init(void)
{
	// The example code here marks all physical pages as free.
	// However this is not truly the case.  What memory is free?
	//  1) Mark physical page 0 as in use.
	//     This way we preserve the real-mode IDT and BIOS structures
	//     in case we ever need them.  (Currently we don't, but...)
	//  2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
	//     is free.
	//  3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
	//     never be allocated.
	//  4) Then extended memory [EXTPHYSMEM, ...).
	//     Some of it is in use, some is free. Where is the kernel
	//     in physical memory?  Which pages are already in use for
	//     page tables and other data structures?
	//
	// Change the code to reflect this.
	// NB: DO NOT actually touch the physical memory corresponding to
	// free pages!
	int i;
	physaddr_t firstFreePhysAddr=(physaddr_t)PADDR(boot_alloc(0));
	page_free_list=NULL;
	for (i = npages-1; i >= 0; i--) 
	{
	     physaddr_t pagePhysAddr=(physaddr_t)PGADDR(0,i,0);
	     if(pagePhysAddr==0||
		(pagePhysAddr>=IOPHYSMEM&&pagePhysAddr<firstFreePhysAddr))
	     {
		pages[i].pp_ref=1;
		pages[i].pp_link=NULL;
	     }
	     else
	     {
		pages[i].pp_ref=0;
		pages[i].pp_link=page_free_list;
		page_free_list=&pages[i];
             }
	}
	
}
コード例 #11
0
ファイル: pmm.c プロジェクト: lhh520/os-4-risc-v
/* *
 * The page directory entry corresponding to the virtual address range
 * [VPT, VPT + PTSIZE) points to the page directory itself. Thus, the page
 * directory is treated as a page table as well as a page directory.
 *
 * One result of treating the page directory as a page table is that all PTEs
 * can be accessed though a "virtual page table" at virtual address VPT. And the
 * PTE for number n is stored in vpt[n].
 *
 * A second consequence is that the contents of the current page directory will
 * always available at virtual address PGADDR(PDX(VPT), PDX(VPT), 0), to which
 * vpd is set bellow.
 * */
pte_t * const vpt = (pte_t *)VPT;
pde_t * const vpd = (pde_t *)PGADDR(PDX(VPT), PDX(VPT), 0);

/* *
 * Global Descriptor Table:
 *
 * The kernel and user segments are identical (except for the DPL). To load
 * the %ss register, the CPL must equal the DPL. Thus, we must duplicate the
 * segments for the user and the kernel. Defined as follows:
 *   - 0x0 :  unused (always faults -- for trapping NULL far pointers)
 *   - 0x8 :  kernel code segment
 *   - 0x10:  kernel data segment
 *   - 0x18:  user code segment
 *   - 0x20:  user data segment
 *   - 0x28:  defined for tss, initialized in gdt_init
 * */
static struct segdesc gdt[] = {
コード例 #12
0
ファイル: spawn.c プロジェクト: gzs715/JOS
// Spawn a child process from a program image loaded from the file system.
// prog: the pathname of the program to run.
// argv: pointer to null-terminated array of pointers to strings,
// 	 which will be passed to the child as its command-line arguments.
// Returns child envid on success, < 0 on failure.
int
spawn(const char *prog, const char **argv)
{
	
	unsigned char elf_buf[512];
	struct Trapframe child_tf;
	envid_t child;

	// Insert your code, following approximately this procedure:
	//
	//   - Open the program file.
	//
	//   - Read the ELF header, as you have before, and sanity check its
	//     magic number.  (Check out your load_icode!)
	//
	//   - Use sys_exofork() to create a new environment.
	//
	//   - Set child_tf to an initial struct Trapframe for the child.
	//     Hint: The sys_exofork() system call has already created
	//     a good basis, in envs[ENVX(child)].env_tf.
	//     Hint: You must do something with the program's entry point.
	//     What?  (See load_icode!)
	//
	//   - Call the init_stack() function above to set up
	//     the initial stack page for the child environment.
	//
	//   - Map all of the program's segments that are of p_type
	//     ELF_PROG_LOAD into the new environment's address space.
	//     Use the p_flags field in the Proghdr for each segment
	//     to determine how to map the segment:
	//
	//	* If the ELF flags do not include ELF_PROG_FLAG_WRITE,
	//	  then the segment contains text and read-only data.
	//	  Use read_map() to read the contents of this segment,
	//	  and map the pages it returns directly into the child
	//        so that multiple instances of the same program
	//	  will share the same copy of the program text.
	//        Be sure to map the program text read-only in the child.
	//        Read_map is like read but returns a pointer to the data in
	//        *blk rather than copying the data into another buffer.
	//
	//	* If the ELF segment flags DO include ELF_PROG_FLAG_WRITE,
	//	  then the segment contains read/write data and bss.
	//	  As with load_icode() in Lab 3, such an ELF segment
	//	  occupies p_memsz bytes in memory, but only the FIRST
	//	  p_filesz bytes of the segment are actually loaded
	//	  from the executable file - you must clear the rest to zero.
	//        For each page to be mapped for a read/write segment,
	//        allocate a page in the parent temporarily at UTEMP,
	//        read() the appropriate portion of the file into that page
	//	  and/or use memset() to zero non-loaded portions.
	//	  (You can avoid calling memset(), if you like, if
	//	  page_alloc() returns zeroed pages already.)
	//        Then insert the page mapping into the child.
	//        Look at init_stack() for inspiration.
	//        Be sure you understand why you can't use read_map() here.
	//
	//     Note: None of the segment addresses or lengths above
	//     are guaranteed to be page-aligned, so you must deal with
	//     these non-page-aligned values appropriately.
	//     The ELF linker does, however, guarantee that no two segments
	//     will overlap on the same page; and it guarantees that
	//     PGOFF(ph->p_offset) == PGOFF(ph->p_va).
	//
	//   - Call sys_env_set_trapframe(child, &child_tf) to set up the
	//     correct initial eip and esp values in the child.
	//
	//   - Start the child process running with sys_env_set_status().

	// LAB 5: Your code here.
	
	
	int fdnum;
	struct Elf *elf;
	char elfbuf[512];
	uintptr_t init_esp;
	int r;
	struct Proghdr *ph, *eph;
	uint32_t pageva,pageoff;
	void *blk;
	uint32_t i;
	uint32_t left;
	uint32_t pteno,pdeno;
	uint32_t pn = 0;
	uintptr_t addr;
	pte_t pte;
	//cprintf("spawn:%s\n",prog);
	
	if((fdnum = open(prog, O_RDWR)) < 0)
		return fdnum;

	read(fdnum, elfbuf, 512);
	elf = (struct Elf *) elfbuf;
	if(elf->e_magic != ELF_MAGIC)
		return -E_NOT_EXEC;
	if((child = sys_exofork()) < 0)
		return child;
	else if(child == 0)
	{
		//cprintf("child\n");
		env = &envs[ENVX(sys_getenvid())];
		return 0;
	}
	child_tf = envs[ENVX(child)].env_tf;
	child_tf.tf_eip = elf->e_entry;
	if((r = init_stack(child, argv, &init_esp)) < 0)
		return r;
	child_tf.tf_esp = init_esp;

	ph = (struct Proghdr *) ((uint8_t*)elf + elf->e_phoff);
	eph = ph + elf->e_phnum;
	for (; ph != eph; ph++)
	{
		if(ph->p_type == ELF_PROG_LOAD)
		{
			pageva = ROUNDDOWN(ph->p_va, PGSIZE);
		        pageoff = ROUNDDOWN(ph->p_offset, PGSIZE);
			if(!(ph->p_flags & ELF_PROG_FLAG_WRITE))
			{
				for(i = 0; i< ph->p_memsz; i += PGSIZE)
				{
					if((r = read_map(fdnum, pageoff + i,&blk)) < 0)
						return r;
					if((r = sys_page_map(0, blk, child, (void *)(pageva + i),PTE_U|PTE_P)) < 0)
						return r;			
				}
			}
			else
			{
				for(i = 0; i < ph->p_memsz; i += PGSIZE)
				{
					sys_page_alloc(0, (void*)UTEMP, PTE_U|PTE_P|PTE_W);	
					if( i < ph->p_filesz)
					{
						
						seek(fdnum, pageoff + i);
						left = ph->p_filesz - i; 

						if(left > PGSIZE)
							read(fdnum, (void *)UTEMP, PGSIZE);
						else
						{
							read(fdnum,(void *)UTEMP, left);
							memset((void*)(UTEMP + left),0x0,PGSIZE - left);
						}
					}
					else
						memset((void*)UTEMP, 0x0,PGSIZE);
					sys_page_map(0, UTEMP, child,(void *)(pageva + i),PTE_U|PTE_W|PTE_P);
					sys_page_unmap(0,UTEMP);
				}
			}
		}

	}
	close(fdnum);
	if((r = sys_env_set_trapframe(child, &child_tf)) < 0)
		return r;

	for(pdeno = PDX(0);pdeno < PDX(UTOP);pdeno++)
	{
		if(!(vpd[pdeno] & (PTE_P)))
			continue;
		else
		{
			for(pteno = 0;pteno < NPTENTRIES;pteno++)
			{
				pn = (pdeno<<10) + pteno;
			
				if((vpt[pn] & PTE_P) && (vpt[pn] & PTE_SHARE))
				{
					//remember not to modify vpt[pn]
					pte = vpt[pn] & PTE_USER;
					addr = (uintptr_t)PGADDR(pdeno, pteno, 0);
					sys_page_map(0,(void*)addr,child,(void*)addr,pte);
				}

			}
		}
	}

	if((r = sys_env_set_status(child, ENV_RUNNABLE)) < 0)
		return r;
	return child;
		

	//panic("spawn unimplemented!");
}