Exemplo n.º 1
0
Arquivo: env.c Projeto: anandab/akaros
// Initialize the kernel virtual memory layout for environment e.
// Allocate a page directory, set e->env_pgdir and e->env_cr3 accordingly,
// and initialize the kernel portion of the new environment's address space.
// Do NOT (yet) map anything into the user portion
// of the environment's virtual address space.
//
// Returns 0 on success, < 0 on error.  Errors include:
//	-ENOMEM if page directory or table could not be allocated.
//
int env_setup_vm(env_t *e)
{
	int i, ret;
	static page_t *shared_page = 0;

	if ((ret = arch_pgdir_setup(boot_pgdir, &e->env_pgdir)))
		return ret;
	/* TODO: verify there is nothing below ULIM */
	e->env_cr3 = arch_pgdir_get_cr3(e->env_pgdir);

	/* These need to be contiguous, so the kernel can alias them.  Note the
	 * pages return with a refcnt, but it's okay to insert them since we free
	 * them manually when the process is cleaned up. */
	if (!(e->procinfo = get_cont_pages(LOG2_UP(PROCINFO_NUM_PAGES), 0)))
		goto env_setup_vm_error_i;
	if (!(e->procdata = get_cont_pages(LOG2_UP(PROCDATA_NUM_PAGES), 0)))
		goto env_setup_vm_error_d;
	/* Normally we'd 0 the pages here.  We handle it in proc_init_proc*.  Don't
	 * start the process without calling those. */
	for (int i = 0; i < PROCINFO_NUM_PAGES; i++) {
		if (page_insert(e->env_pgdir, kva2page((void*)e->procinfo + i *
		                PGSIZE), (void*)(UINFO + i*PGSIZE), PTE_USER_RO) < 0)
			goto env_setup_vm_error;
	}
	for (int i = 0; i < PROCDATA_NUM_PAGES; i++) {
		if (page_insert(e->env_pgdir, kva2page((void*)e->procdata + i *
		                PGSIZE), (void*)(UDATA + i*PGSIZE), PTE_USER_RW) < 0)
			goto env_setup_vm_error;
	}
	for (int i = 0; i < PROCGINFO_NUM_PAGES; i++) {
		if (page_insert(e->env_pgdir,
		                kva2page((void*)&__proc_global_info + i * PGSIZE),
		                (void*)(UGINFO + i * PGSIZE), PTE_USER_RO) < 0)
			goto env_setup_vm_error;
	}
	/* Finally, set up the Global Shared Data page for all processes.  Can't be
	 * trusted, but still very useful at this stage for us.  Consider removing
	 * when we have real processes (TODO). 
	 *
	 * Note the page is alloced only the first time through, and its ref is
	 * stored in shared_page. */
	if (!shared_page) {
		if (upage_alloc(e, &shared_page, 1) < 0)
			goto env_setup_vm_error;
	}
	if (page_insert(e->env_pgdir, shared_page, (void*)UGDATA, PTE_USER_RW) < 0)
		goto env_setup_vm_error;

	return 0;

env_setup_vm_error:
	free_cont_pages(e->procdata, LOG2_UP(PROCDATA_NUM_PAGES));
env_setup_vm_error_d:
	free_cont_pages(e->procinfo, LOG2_UP(PROCINFO_NUM_PAGES));
env_setup_vm_error_i:
	page_decref(shared_page);
	env_user_mem_free(e, 0, UVPT);
	env_pagetable_free(e);
	return -ENOMEM;
}
Exemplo n.º 2
0
//
// Frees env e and all memory it uses.
//
void
env_free(struct Env *e)
{
	pde_t *pgdir;
	pte_t *pt;
	uint32_t pdeno, pteno;
	physaddr_t pa;

	// If freeing the current environment, switch to kern_pgdir
	// before freeing the page directory, just in case the page
	// gets reused.
	if (e == curenv)
		lcr3(PADDR(kern_pgdir));

	// Note the environment's demise.
	cprintf("[%08x] free env %08x\n", curenv ? curenv->env_id : 0, e->env_id);

	// Flush all mapped pages in the user portion of the address space
	static_assert(UTOP % PTSIZE == 0);
	pgdir = e->env_pgdir;
	for (pdeno = 0; pdeno < PDX(UTOP); pdeno++) {
		// only look at mapped page tables
		if (!(pgdir[pdeno] & PTE_P))
			continue;

		// find the pa and va of the page table
		pt = (pte_t *) KADDR(PTE_ADDR(pgdir[pdeno]));

		// unmap all PTEs in this page table
		for (pteno = 0; pteno <= PTX(~0); pteno++) {
			if (pt[pteno] & PTE_P)
				page_remove(pgdir, PGADDR(pdeno, pteno, 0));
		}

		// free the page table itself
		pgdir[pdeno] = 0;
		page_decref(kva2page(pt));
	}

	// free the page directory
	e->env_pgdir = 0;
	page_decref(kva2page(pgdir));

	// return the environment to the free list
	e->env_status = ENV_FREE;
	e->env_link = env_free_list;
	env_free_list = e;
}
Exemplo n.º 3
0
/**
 * The hook called before 'do_execve' successfully return.
 *     What we need to do here are:
 *       1. create a new host container if the process doesn't have one yet;
 *       2. create the stack for syscall stub;
 *       3. unmap umUcore kernel area in the child and erasing the info in the page table;
 *       4. copy arguments to the user stack of the child, and free the kernel's copy;
 *       5. call 'userspace'.
 *     If everything is done, the current thread becomes a monitor thread forever.
 * @param argc the number of arguments
 * @param kargv the copy of the arguments in the kernel
 */
int do_execve_arch_hook(int argc, char **kargv)
{
	if (current->arch.host == NULL) {
		current->arch.host = kmalloc(sizeof(struct host_container));
		if (current->arch.host == NULL)
			return -1;
	}

	void *stub_stack = boot_alloc_page();
	if (stub_stack == NULL)
		goto free_host;
	int ret = start_userspace(stub_stack);
	if (ret <= 0)
		goto free_stub_stack;

	current->arch.host->stub_stack = stub_stack;
	current->arch.host->host_pid = ret;
	current->arch.host->nr_threads = 1;

	/* unmap kernel area */
	if (host_munmap(current, (void *)KERNBASE, KERNTOP - KERNBASE) < 0)
		panic("unmap kernel area failed\n");

	/* erase kernel maps in the page table */
	int valid_size = KERNBASE / PTSIZE * sizeof(pte_t);
	memset((void *)((int)(current->mm->pgdir) + valid_size),
	       0, PGSIZE - valid_size);

	/* Copy arguments */
	current->arch.regs.is_user = 1;
	uintptr_t stacktop = USTACKTOP - argc * PGSIZE;
	char **uargv = (char **)(stacktop - argc * sizeof(char *));
	int i, addr;
	for (i = 0; i < argc; i++) {
		addr = stacktop + i * PGSIZE;
		assert(copy_to_user
		       (current->mm, uargv + i, &addr, sizeof(int)));
		assert(copy_to_user
		       (current->mm, (void *)addr, kargv[i],
			strlen(kargv[i]) + 1));
	}
	stacktop = (uintptr_t) uargv - sizeof(int);
	copy_to_user(current->mm, (void *)stacktop, &argc, sizeof(int));

	/* The copy of the args in the kernel will never be used again and we will not return,
	 *     so free them here.
	 */
	while (argc > 0) {
		kfree(kargv[--argc]);
	}
	userspace(&(current->arch.regs));
	/* should never comes here */

free_stub_stack:
	free_page(kva2page(stub_stack));
free_host:
	kfree(current->arch.host);
	current->arch.host = NULL;
	return -1;
}
Exemplo n.º 4
0
static void
shmn_destroy(shmn_t *shmn) {
    int i;
    for (i = 0; i < SHMN_NENTRY; i ++) {
        shmem_remove_entry_pte(shmn->entry + i);
    }
    free_page(kva2page(shmn->entry));
    kfree(shmn);
}
Exemplo n.º 5
0
/* Helper, allocates a free page. */
static struct page *get_a_free_page(void)
{
	void *addr;

	addr = kpages_alloc(PGSIZE, MEM_ATOMIC);
	if (!addr)
		return NULL;
	return kva2page(addr);
}
Exemplo n.º 6
0
void
vector_clear(Vector *v)
{
    assert(v->elem != NULL);
   
    page_decref(kva2page(v->elem));
    
    v->elem = NULL;
    v->size = v->capacity = 0;
}
Exemplo n.º 7
0
/* Returns true if uva and kva both resolve to the same phys addr.  If uva is
 * unmapped, it will return FALSE.  This is probably what you want, since after
 * all uva isn't kva. */
bool uva_is_kva(struct proc *p, void *uva, void *kva)
{
	struct page *u_page;
	assert(kva);				/* catch bugs */
	/* Check offsets first */
	if (PGOFF(uva) != PGOFF(kva))
		return FALSE;
	/* Check to see if it is the same physical page */
	u_page = page_lookup(p->env_pgdir, uva, 0);
	if (!u_page)
		return FALSE;
	return (kva2page(kva) == u_page) ? TRUE : FALSE;
}
Exemplo n.º 8
0
/**
 * The hook called everytime 'de_thread' is called to perform arch-related operations.
 *     In umUcore, what should be done is checking whether the container process contains nothing
 *     and release it if it does.
 * Note: For do_execve, a container may be destroyed and another be created.
 *       We should use the old one directly in the future.
 * @param proc the PCB of the process to be dettached
 */
void de_thread_arch_hook(struct proc_struct *proc)
{
	if (proc->arch.host != NULL) {
		assert(proc->arch.host->nr_threads > 0);
		proc->arch.host->nr_threads--;
		if (proc->arch.host->nr_threads == 0) {
			free_page(kva2page(current->arch.host->stub_stack));
			kill_ptraced_process(current->arch.host->host_pid, 1);
			kfree(current->arch.host);
			current->arch.host = NULL;
		}
	}
}
Exemplo n.º 9
0
// put_pgdir - free the memory space of PDT
static void
put_pgdir(struct mm_struct *mm) {
  assert(mm->pgdir_alloc_addr);
    free_pages(kva2page(mm->pgdir_alloc_addr),8);
}
Exemplo n.º 10
0
// put_pgdir - free the memory space of PDT
static void
put_pgdir(struct mm_struct *mm) {
    free_page(kva2page(mm->pgdir));
}
Exemplo n.º 11
0
// put_kstack - free the memory space of process kernel stack
static void
put_kstack(struct proc_struct *proc) {
    free_pages(kva2page((void *)(proc->kstack)), KSTACKPAGE);
}
Exemplo n.º 12
0
Arquivo: swap.c Projeto: jefjin/ucore
static void
check_mm_swap(void) {
    size_t nr_free_pages_store = nr_free_pages();
    size_t slab_allocated_store = slab_allocated();

    int ret, i, j;
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    extern struct mm_struct *check_mm_struct;
    assert(check_mm_struct == NULL);

    // step1: check mm_map

    struct mm_struct *mm0 = mm_create(), *mm1;
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    uintptr_t addr0, addr1;

    addr0 = 0;
    do {
        ret = mm_map(mm0, addr0, PTSIZE, 0, NULL);
        assert(ret == (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL);
        addr0 += PTSIZE;
    } while (addr0 != 0);

    addr0 = 0;
    for (i = 0; i < 1024; i ++, addr0 += PTSIZE) {
        ret = mm_map(mm0, addr0, PGSIZE, 0, NULL);
        assert(ret == -E_INVAL);
    }

    mm_destroy(mm0);


    mm0 = mm_create();
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    addr0 = 0, i = 0;
    do {
        ret = mm_map(mm0, addr0, PTSIZE - PGSIZE, 0, NULL);
        assert(ret == (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL);
        if (ret == 0) {
            i ++;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    addr0 = 0, j = 0;
    do {
        addr0 += PTSIZE - PGSIZE;
        ret = mm_map(mm0, addr0, PGSIZE, 0, NULL);
        assert(ret == (USER_ACCESS(addr0, addr0 + PGSIZE)) ? 0 : -E_INVAL);
        if (ret == 0) {
            j ++;
        }
        addr0 += PGSIZE;
    } while (addr0 != 0);

    assert(j + 1 >= i);

    mm_destroy(mm0);

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_swap: step1, mm_map ok.\n");

    // step2: check page fault

    mm0 = mm_create();
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    // setup page table

    struct Page *page = alloc_page();
    assert(page != NULL);
    pde_t *pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;

    // prepare for page fault

    mm0->pgdir = pgdir;
    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    uint32_t vm_flags = VM_WRITE | VM_READ;
    struct vma_struct *vma;

    addr0 = 0;
    do {
        if ((ret = mm_map(mm0, addr0, PTSIZE, vm_flags, &vma)) == 0) {
            break;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    assert(ret == 0 && addr0 != 0 && mm0->map_count == 1);
    assert(vma->vm_start == addr0 && vma->vm_end == addr0 + PTSIZE);

    // check pte entry

    pte_t *ptep;
    for (addr1 = addr0; addr1 < addr0 + PTSIZE; addr1 += PGSIZE) {
        ptep = get_pte(pgdir, addr1, 0);
        assert(ptep == NULL);
    }

    memset((void *)addr0, 0xEF, PGSIZE * 2);
    ptep = get_pte(pgdir, addr0, 0);
    assert(ptep != NULL && (*ptep & PTE_P));
    ptep = get_pte(pgdir, addr0 + PGSIZE, 0);
    assert(ptep != NULL && (*ptep & PTE_P));

    ret = mm_unmap(mm0, - PTSIZE, PTSIZE);
    assert(ret == -E_INVAL);
    ret = mm_unmap(mm0, addr0 + PTSIZE, PGSIZE);
    assert(ret == 0);

    addr1 = addr0 + PTSIZE / 2;
    ret = mm_unmap(mm0, addr1, PGSIZE);
    assert(ret == 0 && mm0->map_count == 2);

    ret = mm_unmap(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4);
    assert(ret == 0 && mm0->map_count == 3);

    ret = mm_map(mm0, addr1, PGSIZE * 6, 0, NULL);
    assert(ret == -E_INVAL);
    ret = mm_map(mm0, addr1, PGSIZE, 0, NULL);
    assert(ret == 0 && mm0->map_count == 4);
    ret = mm_map(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4, 0, NULL);
    assert(ret == 0 && mm0->map_count == 5);

    ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PTSIZE / 2 - PGSIZE);
    assert(ret == 0 && mm0->map_count == 1);

    addr1 = addr0 + PGSIZE;
    for (i = 0; i < PGSIZE; i ++) {
        assert(*(char *)(addr1 + i) == (char)0xEF);
    }

    ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PGSIZE / 4);
    assert(ret == 0 && mm0->map_count == 2);
    ptep = get_pte(pgdir, addr0, 0);
    assert(ptep != NULL && (*ptep & PTE_P));
    ptep = get_pte(pgdir, addr0 + PGSIZE, 0);
    assert(ptep != NULL && *ptep == 0);

    ret = mm_map(mm0, addr1, PGSIZE, vm_flags, NULL);
    memset((void *)addr1, 0x88, PGSIZE);
    assert(*(char *)addr1 == (char)0x88 && mm0->map_count == 3);

    for (i = 1; i < 16; i += 2) {
        ret = mm_unmap(mm0, addr0 + PGSIZE * i, PGSIZE);
        assert(ret == 0);
        if (i < 8) {
            ret = mm_map(mm0, addr0 + PGSIZE * i, PGSIZE, 0, NULL);
            assert(ret == 0);
        }
    }
    assert(mm0->map_count == 13);

    ret = mm_unmap(mm0, addr0 + PGSIZE * 2, PTSIZE - PGSIZE * 2);
    assert(ret == 0 && mm0->map_count == 2);

    ret = mm_unmap(mm0, addr0, PGSIZE * 2);
    assert(ret == 0 && mm0->map_count == 0);

    cprintf("check_mm_swap: step2, mm_unmap ok.\n");

    // step3: check exit_mmap

    ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL);
    assert(ret == 0);

    for (i = 0, addr1 = addr0; i < 4; i ++, addr1 += PGSIZE) {
        *(char *)addr1 = (char)0xFF;
    }

    exit_mmap(mm0);
    for (i = 0; i < PDX(KERNBASE); i ++) {
        assert(pgdir[i] == 0);
    }

    cprintf("check_mm_swap: step3, exit_mmap ok.\n");

    // step4: check dup_mmap

    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL);
    assert(ret != 0);

    addr1 = addr0;
    for (i = 0; i < 4; i ++, addr1 += PGSIZE) {
        *(char *)addr1 = (char)(i * i);
    }

    ret = 0;
    ret += swap_out_mm(mm0, 10);
    ret += swap_out_mm(mm0, 10);
    assert(ret == 4);

    for (; i < 8; i ++, addr1 += PGSIZE) {
        *(char *)addr1 = (char)(i * i);
    }

    // setup mm1

    mm1 = mm_create();
    assert(mm1 != NULL);

    page = alloc_page();
    assert(page != NULL);
    pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
    mm1->pgdir = pgdir;

    ret = dup_mmap(mm1, mm0);
    assert(ret == 0);

    // switch to mm1

    check_mm_struct = mm1;
    lcr3(PADDR(mm1->pgdir));

    addr1 = addr0;
    for (i = 0; i < 8; i ++, addr1 += PGSIZE) {
        assert(*(char *)addr1 == (char)(i * i));
        *(char *)addr1 = (char)0x88;
    }

    // switch to mm0

    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    addr1 = addr0;
    for (i = 0; i < 8; i ++, addr1 += PGSIZE) {
        assert(*(char *)addr1 == (char)(i * i));
    }

    // switch to boot_cr3

    check_mm_struct = NULL;
    lcr3(boot_cr3);

    // free memory

    exit_mmap(mm0);
    exit_mmap(mm1);

    free_page(kva2page(mm0->pgdir));
    mm_destroy(mm0);
    free_page(kva2page(mm1->pgdir));
    mm_destroy(mm1);

    cprintf("check_mm_swap: step4, dup_mmap ok.\n");

    refill_inactive_scan();
    page_launder();
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_swap() succeeded.\n");
}
Exemplo n.º 13
0
Arquivo: swap.c Projeto: jefjin/ucore
static void
check_mm_shm_swap(void) {
    size_t nr_free_pages_store = nr_free_pages();
    size_t slab_allocated_store = slab_allocated();

    int ret, i;
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    extern struct mm_struct *check_mm_struct;
    assert(check_mm_struct == NULL);

    struct mm_struct *mm0 = mm_create(), *mm1;
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    struct Page *page = alloc_page();
    assert(page != NULL);
    pde_t *pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;

    mm0->pgdir = pgdir;
    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    uint32_t vm_flags = VM_WRITE | VM_READ;

    uintptr_t addr0, addr1;

    addr0 = 0;
    do {
        if ((ret = mm_map(mm0, addr0, PTSIZE * 4, vm_flags, NULL)) == 0) {
            break;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    assert(ret == 0 && addr0 != 0 && mm0->map_count == 1);

    ret = mm_unmap(mm0, addr0, PTSIZE * 4);
    assert(ret == 0 && mm0->map_count == 0);

    struct shmem_struct *shmem = shmem_create(PTSIZE * 2);
    assert(shmem != NULL && shmem_ref(shmem) == 0);

    // step1: check share memory

    struct vma_struct *vma;

    addr1 = addr0 + PTSIZE * 2;
    ret = mm_map_shmem(mm0, addr0, vm_flags, shmem, &vma);
    assert(ret == 0);
    assert((vma->vm_flags & VM_SHARE) && vma->shmem == shmem && shmem_ref(shmem) == 1);
    ret = mm_map_shmem(mm0, addr1, vm_flags, shmem, &vma);
    assert(ret == 0);
    assert((vma->vm_flags & VM_SHARE) && vma->shmem == shmem && shmem_ref(shmem) == 2);

    // page fault

    for (i = 0; i < 4; i ++) {
        *(char *)(addr0 + i * PGSIZE) = (char)(i * i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(i * i));
    }

    for (i = 0; i < 4; i ++) {
        *(char *)(addr1 + i * PGSIZE) = (char)(- i * i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(- i * i));
    }

    // check swap

    ret = swap_out_mm(mm0, 8) + swap_out_mm(mm0, 8);
    assert(ret == 8 && nr_active_pages == 4 && nr_inactive_pages == 0);

    refill_inactive_scan();
    assert(nr_active_pages == 0 && nr_inactive_pages == 4);

    // write & read again

    memset((void *)addr0, 0x77, PGSIZE);
    for (i = 0; i < PGSIZE; i ++) {
        assert(*(char *)(addr1 + i) == (char)0x77);
    }

    // check unmap

    ret = mm_unmap(mm0, addr1, PGSIZE * 4);
    assert(ret == 0);

    addr0 += 4 * PGSIZE, addr1 += 4 * PGSIZE;
    *(char *)(addr0) = (char)(0xDC);
    assert(*(char *)(addr1) == (char)(0xDC));
    *(char *)(addr1 + PTSIZE) = (char)(0xDC);
    assert(*(char *)(addr0 + PTSIZE) == (char)(0xDC));

    cprintf("check_mm_shm_swap: step1, share memory ok.\n");

    // setup mm1

    mm1 = mm_create();
    assert(mm1 != NULL);


    page = alloc_page();
    assert(page != NULL);
    pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
    mm1->pgdir = pgdir;


    ret = dup_mmap(mm1, mm0);
    assert(ret == 0 && shmem_ref(shmem) == 4);

    // switch to mm1

    check_mm_struct = mm1;
    lcr3(PADDR(mm1->pgdir));

    for (i = 0; i < 4; i ++) {
        *(char *)(addr0 + i * PGSIZE) = (char)(0x57 + i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(0x57 + i));
    }

    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr0 + i * PGSIZE) == (char)(0x57 + i));
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(0x57 + i));
    }

    swap_out_mm(mm1, 4);
    exit_mmap(mm1);

    free_page(kva2page(mm1->pgdir));
    mm_destroy(mm1);

    assert(shmem_ref(shmem) == 2);

    cprintf("check_mm_shm_swap: step2, dup_mmap ok.\n");

    // free memory

    check_mm_struct = NULL;
    lcr3(boot_cr3);

    exit_mmap(mm0);
    free_page(kva2page(mm0->pgdir));
    mm_destroy(mm0);

    refill_inactive_scan();
    page_launder();
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_shm_swap() succeeded.\n");
}
Exemplo n.º 14
0
/**
 * Make a copy of the current thread/process, giving the parent the child's pid and the child 0.
 *     This is called in do_fork after all structures in the child's PCB are ready.
 * @param clone_flags we need this to determine whether we're creating a 'thread' or a 'process'
 * @param proc the PCB of the child
 * @param user_stack the stack used by the child
 * @param tf the struct containing 'fn' and 'arg'
 * @return 0 if succeeds, or -1 otherwise
 */
int
copy_thread(uint32_t clone_flags, struct proc_struct *proc,
	    uintptr_t user_stack, struct trapframe *tf)
{
	int pid;
	void *stub_stack;

	/* If 'do_fork' is called by the kernel, 'current' should be idle,
	 *     and we're actually creating a kernel thread .
	 * If 'do_fork' is called by the user (i.e. syscall), 'current' is a user PCB,
	 *     and we need to create another user process.
	 */
	if (RUN_AS_USER) {
		if (clone_flags & CLONE_VM) {
			/* Use current host process as its container */
			proc->arch.host = current->arch.host;
			proc->arch.host->nr_threads++;
		} else {
			/* Create a new child process */
			proc->arch.host =
			    kmalloc(sizeof(struct host_container));
			if (proc->arch.host == NULL)
				goto exit;

			stub_stack = boot_alloc_page();
			if (stub_stack == NULL)
				goto exit_free_host;
			pid = start_userspace(stub_stack);

			if (pid < 0)
				goto exit_free_stub;

			/* Initialize host process descriptor */
			proc->arch.forking = 0;
			proc->arch.host->host_pid = pid;
			proc->arch.host->nr_threads = 1;
			proc->arch.host->stub_stack =
			    (struct stub_stack *)stub_stack;
			/* unmap kernel area. */
			if (host_munmap
			    (proc, (void *)KERNBASE, KERNTOP - KERNBASE) < 0)
				panic("unmap kernel area failed \n");
		}
		/* The child should have the same regs as the parent's. */
		memcpy(&(proc->arch.regs.regs), &(current->arch.regs.regs),
		       sizeof(struct user_regs_struct));

		/* make the child return 0 for the syscall */
		proc->arch.regs.regs.eax = 0;

		proc->arch.regs.regs.esp = user_stack;

		/* The current thread will run in 'userspace' */
		proc->context.switch_buf->__ebx = (uint32_t) userspace;
		proc->context.switch_buf->__ebp =
		    (uint32_t) & (proc->arch.regs);
	} else {
		/* For kernel thread */
		proc->context.switch_buf->__ebx = (uint32_t) (tf->fn);
		proc->context.switch_buf->__ebp = (uint32_t) (tf->arg);
	}
	/* All new threads/processes start running from 'kernel_thread_entry'
	 *     for the 'processes' actually means 'monitor threads' to the kernel.
	 */
	proc->context.switch_buf->__eip = (uint32_t) kernel_thread_entry;
	proc->context.switch_buf->__esp = proc->kstack + KSTACKSIZE;

	return 0;

exit_free_stub:
	free_page(kva2page(stub_stack));
exit_free_host:
	kfree(proc->arch.host);
exit:
	return -1;
}