Exemplo n.º 1
0
Arquivo: mm.c Projeto: feng-lei/mario
int dup_mmap(struct task_struct *t)
{
	struct vm_area_struct *vma, *tmp, **p;

	p = &t->mm->mmap;
	*p = NULL;
	for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
		tmp = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct));
		if (!tmp) {
			exit_mmap(t);
			return -ENOMEM;
		}
		*tmp = *vma;
		tmp->vm_mm = t->mm;
		tmp->vm_next = NULL;
		if (tmp->vm_inode)
			iref(tmp->vm_inode);
		/* a shared area? */
		if (tmp->vm_flags & VM_SHARED) {
			tmp->vm_next_share->vm_prev_share = tmp;
			vma->vm_next_share = tmp;
			tmp->vm_prev_share = vma;
		} else {
			tmp->vm_next_share = tmp->vm_prev_share = tmp;
		}
		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);
		*p = tmp;
		p = &tmp->vm_next;
	}
	return 0;
}
Exemplo n.º 2
0
// copy_mm - process "proc" duplicate OR share process "current"'s mm according clone_flags
//         - if clone_flags & CLONE_VM, then "share" ; else "duplicate"
static int
copy_mm(uint32_t clone_flags, struct proc_struct *proc) {
    struct mm_struct *mm, *oldmm = current->mm;

    /* current is a kernel thread */
    if (oldmm == NULL) {
        return 0;
    }
    if (clone_flags & CLONE_VM) {
        mm = oldmm;
        goto good_mm;
    }

    int ret = -E_NO_MEM;
    if ((mm = mm_create()) == NULL) {
        goto bad_mm;
    }
    if (setup_pgdir(mm) != 0) {
        goto bad_pgdir_cleanup_mm;
    }

    lock_mm(oldmm);
    {
        ret = dup_mmap(mm, oldmm);
    }
    unlock_mm(oldmm);

    if (ret != 0) {
        goto bad_dup_cleanup_mmap;
    }

good_mm:
    if (mm != oldmm) {
        mm->brk_start = oldmm->brk_start;
        mm->brk = oldmm->brk;
        bool intr_flag;
        local_intr_save(intr_flag);
        {
            list_add(&(proc_mm_list), &(mm->proc_mm_link));
        }
        local_intr_restore(intr_flag);
    }
    mm_count_inc(mm);
    proc->mm = mm;
	set_pgdir (proc, mm->pgdir);
    return 0;
bad_dup_cleanup_mmap:
    exit_mmap(mm);
    put_pgdir(mm);
bad_pgdir_cleanup_mm:
    mm_destroy(mm);
bad_mm:
    return ret;
}
Exemplo n.º 3
0
/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
	if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
		list_del(&mm->mmlist);
		mmlist_nr--;
		spin_unlock(&mmlist_lock);
		exit_aio(mm);
		exit_mmap(mm);
		mmdrop(mm);
	}
}
Exemplo n.º 4
0
/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
	if (atomic_dec_and_test(&mm->mm_users)) {
		exit_aio(mm);
		exit_mmap(mm);
		if (!list_empty(&mm->mmlist)) {
			spin_lock(&mmlist_lock);
			list_del(&mm->mmlist);
			spin_unlock(&mmlist_lock);
		}
		put_swap_token(mm);
		mmdrop(mm);
	}
}
Exemplo n.º 5
0
/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
	might_sleep();

	if (atomic_dec_and_test(&mm->mm_users)) {
		exit_aio(mm);
		ksm_exit(mm);
		exit_mmap(mm);
		set_mm_exe_file(mm, NULL);
		if (!list_empty(&mm->mmlist)) {
			spin_lock(&mmlist_lock);
			list_del(&mm->mmlist);
			spin_unlock(&mmlist_lock);
		}
		put_swap_token(mm);
		mmdrop(mm);
	}
}
Exemplo n.º 6
0
/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
	might_sleep();

	if (atomic_dec_and_test(&mm->mm_users)) {
		exit_aio(mm);
		ksm_exit(mm);
		khugepaged_exit(mm); /* must run before exit_mmap */
		exit_mmap(mm);
		set_mm_exe_file(mm, NULL);
		if (!list_empty(&mm->mmlist)) {
			spin_lock(&mmlist_lock);
			list_del(&mm->mmlist);
			spin_unlock(&mmlist_lock);
		}
		if (mm->binfmt)
			module_put(mm->binfmt->module);
		mmdrop(mm);
	}
}
Exemplo n.º 7
0
static inline void __exit_mm(struct task_struct * tsk)
{
	struct mm_struct * mm = tsk->mm;

	/* Set us up to use the kernel mm state */
	if (mm != &init_mm) {
		flush_cache_mm(mm);
		flush_tlb_mm(mm);
		tsk->mm = &init_mm;
		tsk->swappable = 0;
		SET_PAGE_DIR(tsk, swapper_pg_dir);

		/* free the old state - not used any more */
		if (!--mm->count) {
			exit_mmap(mm);
			free_page_tables(mm);
			kfree(mm);
		}
	}
}
Exemplo n.º 8
0
int mmput(struct mm_struct *mm)
{
	int mm_freed = 0;
	might_sleep();

	if (atomic_dec_and_test(&mm->mm_users)) {
		exit_aio(mm);
		ksm_exit(mm);
		khugepaged_exit(mm); 
		exit_mmap(mm);
		set_mm_exe_file(mm, NULL);
		if (!list_empty(&mm->mmlist)) {
			spin_lock(&mmlist_lock);
			list_del(&mm->mmlist);
			spin_unlock(&mmlist_lock);
		}
		if (mm->binfmt)
			module_put(mm->binfmt->module);
		mmdrop(mm);
		mm_freed = 1;
	}
	return mm_freed;
}
Exemplo n.º 9
0
int
do_execve(const char *filename, const char **argv, const char **envp) {
    static_assert(EXEC_MAX_ARG_LEN >= FS_MAX_FPATH_LEN);
    
    struct mm_struct *mm = current->mm;

    char local_name[PROC_NAME_LEN + 1];
    memset(local_name, 0, sizeof(local_name));

    char *kargv[EXEC_MAX_ARG_NUM], *kenvp[EXEC_MAX_ENV_NUM];
    const char *path;

    int ret = -E_INVAL;
    lock_mm(mm);
#if 0
    if (name == NULL) {
        snprintf(local_name, sizeof(local_name), "<null> %d", current->pid);
    }
    else {
        if (!copy_string(mm, local_name, name, sizeof(local_name))) {
            unlock_mm(mm);
            return ret;
        }
    }
#endif
    snprintf(local_name, sizeof(local_name), "<null> %d", current->pid);

    int argc = 0, envc = 0;
    if ((ret = copy_kargv(mm, kargv, argv, EXEC_MAX_ARG_NUM, &argc)) != 0) {
      unlock_mm(mm);
      return ret;
    }
    if ((ret = copy_kargv(mm, kenvp, envp, EXEC_MAX_ENV_NUM, &envc)) != 0) {
      unlock_mm(mm);
      put_kargv(argc, kargv);
      return ret;
    }

#if 0
    int i;
    kprintf("## fn %s\n", filename);
    kprintf("## argc %d\n", argc);
    for(i=0;i<argc;i++)
      kprintf("## %08x %s\n", kargv[i], kargv[i]);
    kprintf("## envc %d\n", envc);
    for(i=0;i<envc;i++)
      kprintf("## %08x %s\n", kenvp[i], kenvp[i]);
#endif
    //path = argv[0];
    //copy_from_user (mm, &path, argv, sizeof (char*), 0);
    path = filename;
    unlock_mm(mm);

    /* linux never do this */
    //fs_closeall(current->fs_struct);

    /* sysfile_open will check the first argument path, thus we have to use a user-space pointer, and argv[0] may be incorrect */

    int fd;
    if ((ret = fd = sysfile_open(path, O_RDONLY)) < 0) {
      goto execve_exit;
    }

    if (mm != NULL) {
      mm->lapic = -1;
      mp_set_mm_pagetable(NULL);
      if (mm_count_dec(mm) == 0) {
        exit_mmap(mm);
        put_pgdir(mm);
        bool intr_flag;
        local_intr_save(intr_flag);
        {
          list_del(&(mm->proc_mm_link));
        }
            local_intr_restore(intr_flag);
            mm_destroy(mm);
        }
        current->mm = NULL;
    }
    put_sem_queue(current);

    ret = -E_NO_MEM;
    /* init signal */
    put_sighand(current);
    if ((current->signal_info.sighand = sighand_create()) == NULL) {
      goto execve_exit;
    }
    sighand_count_inc(current->signal_info.sighand);

    put_signal(current);
    if ((current->signal_info.signal = signal_create()) == NULL) {
      goto execve_exit;
    }
    signal_count_inc(current->signal_info.signal);

    if ((current->sem_queue = sem_queue_create()) == NULL) {
      goto execve_exit;
    }
    sem_queue_count_inc(current->sem_queue);

    if ((ret = load_icode(fd, argc, kargv, envc, kenvp)) != 0) {
      goto execve_exit;
    }

    set_proc_name(current, local_name);

	if (do_execve_arch_hook (argc, kargv) < 0)
		goto execve_exit;
	
    put_kargv(argc, kargv);
    return 0;

execve_exit:
    put_kargv(argc, kargv);
    put_kargv(envc, kenvp);
/* exec should return -1 if failed */
    //return ret;
    do_exit(ret);
    panic("already exit: %e.\n", ret);
}
Exemplo n.º 10
0
static int
load_icode(int fd, int argc, char **kargv, int envc, char **kenvp) {
    assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
    assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
    if (current->mm != NULL) {
        panic("load_icode: current->mm must be empty.\n");
    }

    int ret = -E_NO_MEM;

    struct mm_struct *mm;
    if ((mm = mm_create()) == NULL) {
        goto bad_mm;
    }

    if (setup_pgdir(mm) != 0) {
        goto bad_pgdir_cleanup_mm;
    }

    mm->brk_start = 0;

    struct Page *page;

    struct elfhdr __elf, *elf = &__elf;
    if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
        goto bad_elf_cleanup_pgdir;
    }
	
    if (elf->e_magic != ELF_MAGIC) {
        ret = -E_INVAL_ELF;
        goto bad_elf_cleanup_pgdir;
    }

    struct proghdr __ph, *ph = &__ph;
    uint32_t vm_flags, phnum;
    pte_perm_t perm = 0;
    for (phnum = 0; phnum < elf->e_phnum; phnum ++) {
      off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
      if ((ret = load_icode_read(fd, ph, sizeof(struct proghdr), phoff)) != 0) {
        goto bad_cleanup_mmap;
      }
      if (ph->p_type != ELF_PT_LOAD) {
        continue ;
      }
      if (ph->p_filesz > ph->p_memsz) {
        ret = -E_INVAL_ELF;
        goto bad_cleanup_mmap;
      }
      vm_flags = 0;
      ptep_set_u_read(&perm);
      if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
      if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
      if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
      if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

      if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
        goto bad_cleanup_mmap;
      }

      if (mm->brk_start < ph->p_va + ph->p_memsz) {
        mm->brk_start = ph->p_va + ph->p_memsz;
      }

      off_t offset = ph->p_offset;
      size_t off, size;
      uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

      end = ph->p_va + ph->p_filesz;
      while (start < end) {
        if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
          ret = -E_NO_MEM;
          goto bad_cleanup_mmap;
        }
        off = start - la, size = PGSIZE - off, la += PGSIZE;
        if (end < la) {
          size -= la - end;
        }
        if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
          goto bad_cleanup_mmap;
        }
        start += size, offset += size;
      }

      end = ph->p_va + ph->p_memsz;

      if (start < la) {
        /* ph->p_memsz == ph->p_filesz */
        if (start == end) {
          continue ;
        }
        off = start + PGSIZE - la, size = PGSIZE - off;
        if (end < la) {
          size -= la - end;
        }
        memset(page2kva(page) + off, 0, size);
        start += size;
        assert((end < la && start == end) || (end >= la && start == la));
      }

      while (start < end) {
        if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
          ret = -E_NO_MEM;
          goto bad_cleanup_mmap;
        }
        off = start - la, size = PGSIZE - off, la += PGSIZE;
        if (end < la) {
          size -= la - end;
        }
        memset(page2kva(page) + off, 0, size);
        start += size;
      }
    }
    sysfile_close(fd);

    mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

    /* setup user stack */
    vm_flags = VM_READ | VM_WRITE | VM_STACK;
    if ((ret = mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags, NULL)) != 0) {
      goto bad_cleanup_mmap;
    }

    bool intr_flag;
    local_intr_save(intr_flag);
    {
      list_add(&(proc_mm_list), &(mm->proc_mm_link));
    }
    local_intr_restore(intr_flag);
    mm_count_inc(mm);
    current->mm = mm;
    set_pgdir(current, mm->pgdir);
    mm->lapic = pls_read(lapic_id);
    mp_set_mm_pagetable(mm);

    if (init_new_context (current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;

    ret = 0;
out:
    return ret;
bad_cleanup_mmap:
    exit_mmap(mm);
bad_elf_cleanup_pgdir:
    put_pgdir(mm);
bad_pgdir_cleanup_mm:
    mm_destroy(mm);
bad_mm:
    goto out;
}
Exemplo n.º 11
0
// __do_exit - cause a thread exit (use do_exit, do_exit_thread instead)
//   1. call exit_mmap & put_pgdir & mm_destroy to free the almost all memory space of process
//   2. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself.
//   3. call scheduler to switch to other process
static int
__do_exit(void) {
    if (current == idleproc) {
        panic("idleproc exit.\n");
    }
    if (current == initproc) {
        panic("initproc exit.\n");
    }

    struct mm_struct *mm = current->mm;
    if (mm != NULL) {
		mm->lapic = -1;
        mp_set_mm_pagetable(NULL);
        if (mm_count_dec(mm) == 0) {
            exit_mmap(mm);
            put_pgdir(mm);
            bool intr_flag;
            local_intr_save(intr_flag);
            {
                list_del(&(mm->proc_mm_link));
            }
            local_intr_restore(intr_flag);
            mm_destroy(mm);
        }
        current->mm = NULL;
    }
    put_sighand(current);
    put_signal(current);
    put_fs(current);
    put_sem_queue(current);
    current->state = PROC_ZOMBIE;

    bool intr_flag;
    struct proc_struct *proc, *parent;
    local_intr_save(intr_flag);
    {
        proc = parent = current->parent;
        do {
            if (proc->wait_state == WT_CHILD) {
                wakeup_proc(proc);
            }
            proc = next_thread(proc);
        } while (proc != parent);

        if ((parent = next_thread(current)) == current) {
            parent = initproc;
        }
        de_thread(current);
        while (current->cptr != NULL) {
            proc = current->cptr;
            current->cptr = proc->optr;

            proc->yptr = NULL;
            if ((proc->optr = parent->cptr) != NULL) {
                parent->cptr->yptr = proc;
            }
            proc->parent = parent;
            parent->cptr = proc;
            if (proc->state == PROC_ZOMBIE) {
                if (parent->wait_state == WT_CHILD) {
                    wakeup_proc(parent);
                }
            }
        }
    }

    wakeup_queue(&(current->event_box.wait_queue), WT_INTERRUPTED, 1);

    local_intr_restore(intr_flag);

    schedule();
    panic("__do_exit will not return!! %d %d.\n", current->pid, current->exit_code);
}
Exemplo n.º 12
0
Arquivo: swap.c Projeto: jefjin/ucore
static void
check_mm_swap(void) {
    size_t nr_free_pages_store = nr_free_pages();
    size_t slab_allocated_store = slab_allocated();

    int ret, i, j;
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    extern struct mm_struct *check_mm_struct;
    assert(check_mm_struct == NULL);

    // step1: check mm_map

    struct mm_struct *mm0 = mm_create(), *mm1;
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    uintptr_t addr0, addr1;

    addr0 = 0;
    do {
        ret = mm_map(mm0, addr0, PTSIZE, 0, NULL);
        assert(ret == (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL);
        addr0 += PTSIZE;
    } while (addr0 != 0);

    addr0 = 0;
    for (i = 0; i < 1024; i ++, addr0 += PTSIZE) {
        ret = mm_map(mm0, addr0, PGSIZE, 0, NULL);
        assert(ret == -E_INVAL);
    }

    mm_destroy(mm0);


    mm0 = mm_create();
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    addr0 = 0, i = 0;
    do {
        ret = mm_map(mm0, addr0, PTSIZE - PGSIZE, 0, NULL);
        assert(ret == (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL);
        if (ret == 0) {
            i ++;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    addr0 = 0, j = 0;
    do {
        addr0 += PTSIZE - PGSIZE;
        ret = mm_map(mm0, addr0, PGSIZE, 0, NULL);
        assert(ret == (USER_ACCESS(addr0, addr0 + PGSIZE)) ? 0 : -E_INVAL);
        if (ret == 0) {
            j ++;
        }
        addr0 += PGSIZE;
    } while (addr0 != 0);

    assert(j + 1 >= i);

    mm_destroy(mm0);

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_swap: step1, mm_map ok.\n");

    // step2: check page fault

    mm0 = mm_create();
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    // setup page table

    struct Page *page = alloc_page();
    assert(page != NULL);
    pde_t *pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;

    // prepare for page fault

    mm0->pgdir = pgdir;
    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    uint32_t vm_flags = VM_WRITE | VM_READ;
    struct vma_struct *vma;

    addr0 = 0;
    do {
        if ((ret = mm_map(mm0, addr0, PTSIZE, vm_flags, &vma)) == 0) {
            break;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    assert(ret == 0 && addr0 != 0 && mm0->map_count == 1);
    assert(vma->vm_start == addr0 && vma->vm_end == addr0 + PTSIZE);

    // check pte entry

    pte_t *ptep;
    for (addr1 = addr0; addr1 < addr0 + PTSIZE; addr1 += PGSIZE) {
        ptep = get_pte(pgdir, addr1, 0);
        assert(ptep == NULL);
    }

    memset((void *)addr0, 0xEF, PGSIZE * 2);
    ptep = get_pte(pgdir, addr0, 0);
    assert(ptep != NULL && (*ptep & PTE_P));
    ptep = get_pte(pgdir, addr0 + PGSIZE, 0);
    assert(ptep != NULL && (*ptep & PTE_P));

    ret = mm_unmap(mm0, - PTSIZE, PTSIZE);
    assert(ret == -E_INVAL);
    ret = mm_unmap(mm0, addr0 + PTSIZE, PGSIZE);
    assert(ret == 0);

    addr1 = addr0 + PTSIZE / 2;
    ret = mm_unmap(mm0, addr1, PGSIZE);
    assert(ret == 0 && mm0->map_count == 2);

    ret = mm_unmap(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4);
    assert(ret == 0 && mm0->map_count == 3);

    ret = mm_map(mm0, addr1, PGSIZE * 6, 0, NULL);
    assert(ret == -E_INVAL);
    ret = mm_map(mm0, addr1, PGSIZE, 0, NULL);
    assert(ret == 0 && mm0->map_count == 4);
    ret = mm_map(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4, 0, NULL);
    assert(ret == 0 && mm0->map_count == 5);

    ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PTSIZE / 2 - PGSIZE);
    assert(ret == 0 && mm0->map_count == 1);

    addr1 = addr0 + PGSIZE;
    for (i = 0; i < PGSIZE; i ++) {
        assert(*(char *)(addr1 + i) == (char)0xEF);
    }

    ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PGSIZE / 4);
    assert(ret == 0 && mm0->map_count == 2);
    ptep = get_pte(pgdir, addr0, 0);
    assert(ptep != NULL && (*ptep & PTE_P));
    ptep = get_pte(pgdir, addr0 + PGSIZE, 0);
    assert(ptep != NULL && *ptep == 0);

    ret = mm_map(mm0, addr1, PGSIZE, vm_flags, NULL);
    memset((void *)addr1, 0x88, PGSIZE);
    assert(*(char *)addr1 == (char)0x88 && mm0->map_count == 3);

    for (i = 1; i < 16; i += 2) {
        ret = mm_unmap(mm0, addr0 + PGSIZE * i, PGSIZE);
        assert(ret == 0);
        if (i < 8) {
            ret = mm_map(mm0, addr0 + PGSIZE * i, PGSIZE, 0, NULL);
            assert(ret == 0);
        }
    }
    assert(mm0->map_count == 13);

    ret = mm_unmap(mm0, addr0 + PGSIZE * 2, PTSIZE - PGSIZE * 2);
    assert(ret == 0 && mm0->map_count == 2);

    ret = mm_unmap(mm0, addr0, PGSIZE * 2);
    assert(ret == 0 && mm0->map_count == 0);

    cprintf("check_mm_swap: step2, mm_unmap ok.\n");

    // step3: check exit_mmap

    ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL);
    assert(ret == 0);

    for (i = 0, addr1 = addr0; i < 4; i ++, addr1 += PGSIZE) {
        *(char *)addr1 = (char)0xFF;
    }

    exit_mmap(mm0);
    for (i = 0; i < PDX(KERNBASE); i ++) {
        assert(pgdir[i] == 0);
    }

    cprintf("check_mm_swap: step3, exit_mmap ok.\n");

    // step4: check dup_mmap

    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL);
    assert(ret != 0);

    addr1 = addr0;
    for (i = 0; i < 4; i ++, addr1 += PGSIZE) {
        *(char *)addr1 = (char)(i * i);
    }

    ret = 0;
    ret += swap_out_mm(mm0, 10);
    ret += swap_out_mm(mm0, 10);
    assert(ret == 4);

    for (; i < 8; i ++, addr1 += PGSIZE) {
        *(char *)addr1 = (char)(i * i);
    }

    // setup mm1

    mm1 = mm_create();
    assert(mm1 != NULL);

    page = alloc_page();
    assert(page != NULL);
    pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
    mm1->pgdir = pgdir;

    ret = dup_mmap(mm1, mm0);
    assert(ret == 0);

    // switch to mm1

    check_mm_struct = mm1;
    lcr3(PADDR(mm1->pgdir));

    addr1 = addr0;
    for (i = 0; i < 8; i ++, addr1 += PGSIZE) {
        assert(*(char *)addr1 == (char)(i * i));
        *(char *)addr1 = (char)0x88;
    }

    // switch to mm0

    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    addr1 = addr0;
    for (i = 0; i < 8; i ++, addr1 += PGSIZE) {
        assert(*(char *)addr1 == (char)(i * i));
    }

    // switch to boot_cr3

    check_mm_struct = NULL;
    lcr3(boot_cr3);

    // free memory

    exit_mmap(mm0);
    exit_mmap(mm1);

    free_page(kva2page(mm0->pgdir));
    mm_destroy(mm0);
    free_page(kva2page(mm1->pgdir));
    mm_destroy(mm1);

    cprintf("check_mm_swap: step4, dup_mmap ok.\n");

    refill_inactive_scan();
    page_launder();
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_swap() succeeded.\n");
}
Exemplo n.º 13
0
Arquivo: swap.c Projeto: jefjin/ucore
static void
check_mm_shm_swap(void) {
    size_t nr_free_pages_store = nr_free_pages();
    size_t slab_allocated_store = slab_allocated();

    int ret, i;
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    extern struct mm_struct *check_mm_struct;
    assert(check_mm_struct == NULL);

    struct mm_struct *mm0 = mm_create(), *mm1;
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    struct Page *page = alloc_page();
    assert(page != NULL);
    pde_t *pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;

    mm0->pgdir = pgdir;
    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    uint32_t vm_flags = VM_WRITE | VM_READ;

    uintptr_t addr0, addr1;

    addr0 = 0;
    do {
        if ((ret = mm_map(mm0, addr0, PTSIZE * 4, vm_flags, NULL)) == 0) {
            break;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    assert(ret == 0 && addr0 != 0 && mm0->map_count == 1);

    ret = mm_unmap(mm0, addr0, PTSIZE * 4);
    assert(ret == 0 && mm0->map_count == 0);

    struct shmem_struct *shmem = shmem_create(PTSIZE * 2);
    assert(shmem != NULL && shmem_ref(shmem) == 0);

    // step1: check share memory

    struct vma_struct *vma;

    addr1 = addr0 + PTSIZE * 2;
    ret = mm_map_shmem(mm0, addr0, vm_flags, shmem, &vma);
    assert(ret == 0);
    assert((vma->vm_flags & VM_SHARE) && vma->shmem == shmem && shmem_ref(shmem) == 1);
    ret = mm_map_shmem(mm0, addr1, vm_flags, shmem, &vma);
    assert(ret == 0);
    assert((vma->vm_flags & VM_SHARE) && vma->shmem == shmem && shmem_ref(shmem) == 2);

    // page fault

    for (i = 0; i < 4; i ++) {
        *(char *)(addr0 + i * PGSIZE) = (char)(i * i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(i * i));
    }

    for (i = 0; i < 4; i ++) {
        *(char *)(addr1 + i * PGSIZE) = (char)(- i * i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(- i * i));
    }

    // check swap

    ret = swap_out_mm(mm0, 8) + swap_out_mm(mm0, 8);
    assert(ret == 8 && nr_active_pages == 4 && nr_inactive_pages == 0);

    refill_inactive_scan();
    assert(nr_active_pages == 0 && nr_inactive_pages == 4);

    // write & read again

    memset((void *)addr0, 0x77, PGSIZE);
    for (i = 0; i < PGSIZE; i ++) {
        assert(*(char *)(addr1 + i) == (char)0x77);
    }

    // check unmap

    ret = mm_unmap(mm0, addr1, PGSIZE * 4);
    assert(ret == 0);

    addr0 += 4 * PGSIZE, addr1 += 4 * PGSIZE;
    *(char *)(addr0) = (char)(0xDC);
    assert(*(char *)(addr1) == (char)(0xDC));
    *(char *)(addr1 + PTSIZE) = (char)(0xDC);
    assert(*(char *)(addr0 + PTSIZE) == (char)(0xDC));

    cprintf("check_mm_shm_swap: step1, share memory ok.\n");

    // setup mm1

    mm1 = mm_create();
    assert(mm1 != NULL);


    page = alloc_page();
    assert(page != NULL);
    pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
    mm1->pgdir = pgdir;


    ret = dup_mmap(mm1, mm0);
    assert(ret == 0 && shmem_ref(shmem) == 4);

    // switch to mm1

    check_mm_struct = mm1;
    lcr3(PADDR(mm1->pgdir));

    for (i = 0; i < 4; i ++) {
        *(char *)(addr0 + i * PGSIZE) = (char)(0x57 + i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(0x57 + i));
    }

    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr0 + i * PGSIZE) == (char)(0x57 + i));
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(0x57 + i));
    }

    swap_out_mm(mm1, 4);
    exit_mmap(mm1);

    free_page(kva2page(mm1->pgdir));
    mm_destroy(mm1);

    assert(shmem_ref(shmem) == 2);

    cprintf("check_mm_shm_swap: step2, dup_mmap ok.\n");

    // free memory

    check_mm_struct = NULL;
    lcr3(boot_cr3);

    exit_mmap(mm0);
    free_page(kva2page(mm0->pgdir));
    mm_destroy(mm0);

    refill_inactive_scan();
    page_launder();
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_shm_swap() succeeded.\n");
}
Exemplo n.º 14
0
static int load_icode(int fd, int argc, char **kargv, int envc, char **kenvp)
{
	assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
	assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
	if (current->mm != NULL) {
		panic("load_icode: current->mm must be empty.\n");
	}

	int ret = -E_NO_MEM;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t real_entry;
//#endif //UCONFIG_BIONIC_LIBC

	struct mm_struct *mm;
	if ((mm = mm_create()) == NULL) {
		goto bad_mm;
	}

	if (setup_pgdir(mm) != 0) {
		goto bad_pgdir_cleanup_mm;
	}

	mm->brk_start = 0;

	struct Page *page;

	struct elfhdr __elf, *elf = &__elf;
	if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
		goto bad_elf_cleanup_pgdir;
	}

	if (elf->e_magic != ELF_MAGIC) {
		ret = -E_INVAL_ELF;
		goto bad_elf_cleanup_pgdir;
	}
//#ifdef UCONFIG_BIONIC_LIBC
	real_entry = elf->e_entry;

	uint32_t load_address, load_address_flag = 0;
//#endif //UCONFIG_BIONIC_LIBC

	struct proghdr __ph, *ph = &__ph;
	uint32_t vm_flags, phnum;
	pte_perm_t perm = 0;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t is_dynamic = 0, interp_idx;
	uint32_t bias = 0;
//#endif //UCONFIG_BIONIC_LIBC
	for (phnum = 0; phnum < elf->e_phnum; phnum++) {
		off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		if (ph->p_type == ELF_PT_INTERP) {
			is_dynamic = 1;
			interp_idx = phnum;
			continue;
		}

		if (ph->p_type != ELF_PT_LOAD) {
			continue;
		}
		if (ph->p_filesz > ph->p_memsz) {
			ret = -E_INVAL_ELF;
			goto bad_cleanup_mmap;
		}

		if (ph->p_va == 0 && !bias) {
			bias = 0x00008000;
		}

		if ((ret = map_ph(fd, ph, mm, &bias, 0)) != 0) {
			kprintf("load address: 0x%08x size: %d\n", ph->p_va,
				ph->p_memsz);
			goto bad_cleanup_mmap;
		}

		if (load_address_flag == 0)
			load_address = ph->p_va + bias;
		++load_address_flag;

	  /*********************************************/
		/*
		   vm_flags = 0;
		   ptep_set_u_read(&perm);
		   if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
		   if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
		   if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
		   if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

		   if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
		   goto bad_cleanup_mmap;
		   }

		   if (mm->brk_start < ph->p_va + ph->p_memsz) {
		   mm->brk_start = ph->p_va + ph->p_memsz;
		   }

		   off_t offset = ph->p_offset;
		   size_t off, size;
		   uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

		   end = ph->p_va + ph->p_filesz;
		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
		   goto bad_cleanup_mmap;
		   }
		   start += size, offset += size;
		   }

		   end = ph->p_va + ph->p_memsz;

		   if (start < la) {
		   // ph->p_memsz == ph->p_filesz 
		   if (start == end) {
		   continue ;
		   }
		   off = start + PGSIZE - la, size = PGSIZE - off;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   assert((end < la && start == end) || (end >= la && start == la));
		   }

		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   }
		 */
	  /**************************************/
	}

	mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

	/* setup user stack */
	vm_flags = VM_READ | VM_WRITE | VM_STACK;
	if ((ret =
	     mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags,
		    NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (is_dynamic) {
		elf->e_entry += bias;

		bias = 0;

		off_t phoff =
		    elf->e_phoff + sizeof(struct proghdr) * interp_idx;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		char *interp_path = (char *)kmalloc(ph->p_filesz);
		load_icode_read(fd, interp_path, ph->p_filesz, ph->p_offset);

		int interp_fd = sysfile_open(interp_path, O_RDONLY);
		assert(interp_fd >= 0);
		struct elfhdr interp___elf, *interp_elf = &interp___elf;
		assert((ret =
			load_icode_read(interp_fd, interp_elf,
					sizeof(struct elfhdr), 0)) == 0);
		assert(interp_elf->e_magic == ELF_MAGIC);

		struct proghdr interp___ph, *interp_ph = &interp___ph;
		uint32_t interp_phnum;
		uint32_t va_min = 0xffffffff, va_max = 0;
		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			if (va_min > interp_ph->p_va)
				va_min = interp_ph->p_va;
			if (va_max < interp_ph->p_va + interp_ph->p_memsz)
				va_max = interp_ph->p_va + interp_ph->p_memsz;
		}

		bias = get_unmapped_area(mm, va_max - va_min + 1 + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);

		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			assert((ret =
				map_ph(interp_fd, interp_ph, mm, &bias,
				       1)) == 0);
		}

		real_entry = interp_elf->e_entry + bias;

		sysfile_close(interp_fd);
		kfree(interp_path);
	}

	sysfile_close(fd);

	bool intr_flag;
	local_intr_save(intr_flag);
	{
		list_add(&(proc_mm_list), &(mm->proc_mm_link));
	}
	local_intr_restore(intr_flag);
	mm_count_inc(mm);
	current->mm = mm;
	set_pgdir(current, mm->pgdir);
	mm->cpuid = myid();
	mp_set_mm_pagetable(mm);

	if (!is_dynamic) {
		real_entry += bias;
	}
#ifdef UCONFIG_BIONIC_LIBC
	if (init_new_context_dynamic(current, elf, argc, kargv, envc, kenvp,
				     is_dynamic, real_entry, load_address,
				     bias) < 0)
		goto bad_cleanup_mmap;
#else
	if (init_new_context(current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;
#endif //UCONFIG_BIONIC_LIBC
	ret = 0;
out:
	return ret;
bad_cleanup_mmap:
	exit_mmap(mm);
bad_elf_cleanup_pgdir:
	put_pgdir(mm);
bad_pgdir_cleanup_mm:
	mm_destroy(mm);
bad_mm:
	goto out;
}