예제 #1
0
파일: swap.c 프로젝트: spinlock/ucore
static void
check_mm_shm_swap(void) {
    size_t nr_free_pages_store = nr_free_pages();
    size_t slab_allocated_store = slab_allocated();

    int ret, i;
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    extern struct mm_struct *check_mm_struct;
    assert(check_mm_struct == NULL);

    struct mm_struct *mm0 = mm_create(), *mm1;
    assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

    struct Page *page = alloc_page();
    assert(page != NULL);
    pgd_t *pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PGX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;

    mm0->pgdir = pgdir;
    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    uint32_t vm_flags = VM_WRITE | VM_READ;

    uintptr_t addr0, addr1;

    addr0 = 0;
    do {
        if ((ret = mm_map(mm0, addr0, PTSIZE * 4, vm_flags, NULL)) == 0) {
            break;
        }
        addr0 += PTSIZE;
    } while (addr0 != 0);

    assert(ret == 0 && addr0 != 0 && mm0->map_count == 1);

    ret = mm_unmap(mm0, addr0, PTSIZE * 4);
    assert(ret == 0 && mm0->map_count == 0);

    struct shmem_struct *shmem = shmem_create(PTSIZE * 2);
    assert(shmem != NULL && shmem_ref(shmem) == 0);

    // step1: check share memory

    struct vma_struct *vma;

    addr1 = addr0 + PTSIZE * 2;
    ret = mm_map_shmem(mm0, addr0, vm_flags, shmem, &vma);
    assert(ret == 0);
    assert((vma->vm_flags & VM_SHARE) && vma->shmem == shmem && shmem_ref(shmem) == 1);
    ret = mm_map_shmem(mm0, addr1, vm_flags, shmem, &vma);
    assert(ret == 0);
    assert((vma->vm_flags & VM_SHARE) && vma->shmem == shmem && shmem_ref(shmem) == 2);

    // page fault

    for (i = 0; i < 4; i ++) {
        *(char *)(addr0 + i * PGSIZE) = (char)(i * i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(i * i));
    }

    for (i = 0; i < 4; i ++) {
        *(char *)(addr1 + i * PGSIZE) = (char)(- i * i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(- i * i));
    }

    // check swap

    ret = swap_out_mm(mm0, 8) + swap_out_mm(mm0, 8);
    assert(ret == 8 && nr_active_pages == 4 && nr_inactive_pages == 0);

    refill_inactive_scan();
    assert(nr_active_pages == 0 && nr_inactive_pages == 4);

    // write & read again

    memset((void *)addr0, 0x77, PGSIZE);
    for (i = 0; i < PGSIZE; i ++) {
        assert(*(char *)(addr1 + i) == (char)0x77);
    }

    // check unmap

    ret = mm_unmap(mm0, addr1, PGSIZE * 4);
    assert(ret == 0);

    addr0 += 4 * PGSIZE, addr1 += 4 * PGSIZE;
    *(char *)(addr0) = (char)(0xDC);
    assert(*(char *)(addr1) == (char)(0xDC));
    *(char *)(addr1 + PTSIZE) = (char)(0xDC);
    assert(*(char *)(addr0 + PTSIZE) == (char)(0xDC));

    cprintf("check_mm_shm_swap: step1, share memory ok.\n");

    // setup mm1

    mm1 = mm_create();
    assert(mm1 != NULL);


    page = alloc_page();
    assert(page != NULL);
    pgdir = page2kva(page);
    memcpy(pgdir, boot_pgdir, PGSIZE);
    pgdir[PGX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
    mm1->pgdir = pgdir;


    ret = dup_mmap(mm1, mm0);
    assert(ret == 0 && shmem_ref(shmem) == 4);

    // switch to mm1

    check_mm_struct = mm1;
    lcr3(PADDR(mm1->pgdir));

    for (i = 0; i < 4; i ++) {
        *(char *)(addr0 + i * PGSIZE) = (char)(0x57 + i);
    }
    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(0x57 + i));
    }

    check_mm_struct = mm0;
    lcr3(PADDR(mm0->pgdir));

    for (i = 0; i < 4; i ++) {
        assert(*(char *)(addr0 + i * PGSIZE) == (char)(0x57 + i));
        assert(*(char *)(addr1 + i * PGSIZE) == (char)(0x57 + i));
    }

    swap_out_mm(mm1, 4);
    exit_mmap(mm1);

    free_page(kva2page(mm1->pgdir));
    mm_destroy(mm1);

    assert(shmem_ref(shmem) == 2);

    cprintf("check_mm_shm_swap: step2, dup_mmap ok.\n");

    // free memory

    check_mm_struct = NULL;
    lcr3(boot_cr3);

    exit_mmap(mm0);
    free_page(kva2page(mm0->pgdir));
    mm_destroy(mm0);

    refill_inactive_scan();
    page_launder();
    for (i = 0; i < max_swap_offset; i ++) {
        assert(mem_map[i] == SWAP_UNUSED);
    }

    assert(nr_free_pages_store == nr_free_pages());
    assert(slab_allocated_store == slab_allocated());

    cprintf("check_mm_shm_swap() succeeded.\n");
}
예제 #2
0
파일: swap.c 프로젝트: Aresthu/ucore_plus
void check_mm_swap(void)
{
	size_t nr_free_pages_store = nr_free_pages();
	size_t slab_allocated_store = slab_allocated();

	int ret, i, j;
	for (i = 0; i < max_swap_offset; i++) {
		assert(mem_map[i] == SWAP_UNUSED);
	}

	extern struct mm_struct *check_mm_struct;
	assert(check_mm_struct == NULL);

	// step1: check mm_map

	struct mm_struct *mm0 = mm_create(), *mm1;
	assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

	uintptr_t addr0, addr1;

	addr0 = 0;
	do {
		ret = mm_map(mm0, addr0, PTSIZE, 0, NULL);
		assert(ret ==
		       (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL);
		addr0 += PTSIZE;
	} while (addr0 != 0);

	addr0 = 0;
	for (i = 0; i < 1024; i++, addr0 += PTSIZE) {
		ret = mm_map(mm0, addr0, PGSIZE, 0, NULL);
		assert(ret == -E_INVAL);
	}

	mm_destroy(mm0);

	mm0 = mm_create();
	assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

	addr0 = 0, i = 0;
	do {
		ret = mm_map(mm0, addr0, PTSIZE - PGSIZE, 0, NULL);
		assert(ret ==
		       (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL);
		if (ret == 0) {
			i++;
		}
		addr0 += PTSIZE;
	} while (addr0 != 0);

	addr0 = 0, j = 0;
	do {
		addr0 += PTSIZE - PGSIZE;
		ret = mm_map(mm0, addr0, PGSIZE, 0, NULL);
		assert(ret ==
		       (USER_ACCESS(addr0, addr0 + PGSIZE)) ? 0 : -E_INVAL);
		if (ret == 0) {
			j++;
		}
		addr0 += PGSIZE;
	} while (addr0 != 0);

	assert(j + 1 >= i);

	mm_destroy(mm0);

	assert(nr_free_pages_store == nr_free_pages());
	assert(slab_allocated_store == slab_allocated());

	kprintf("check_mm_swap: step1, mm_map ok.\n");

	// step2: check page fault

	mm0 = mm_create();
	assert(mm0 != NULL && list_empty(&(mm0->mmap_list)));

	// setup page table

	struct Page *page = alloc_page();
	assert(page != NULL);
	pde_t *pgdir = page2kva(page);
	memcpy(pgdir, boot_pgdir, PGSIZE);
	pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;

	// prepare for page fault

	mm0->pgdir = pgdir;
	check_mm_struct = mm0;
	lcr3(PADDR(mm0->pgdir));

	uint32_t vm_flags = VM_WRITE | VM_READ;
	struct vma_struct *vma;

	addr0 = 0;
	do {
		if ((ret = mm_map(mm0, addr0, PTSIZE, vm_flags, &vma)) == 0) {
			break;
		}
		addr0 += PTSIZE;
	} while (addr0 != 0);

	assert(ret == 0 && addr0 != 0 && mm0->map_count == 1);
	assert(vma->vm_start == addr0 && vma->vm_end == addr0 + PTSIZE);

	// check pte entry

	pte_t *ptep;
	for (addr1 = addr0; addr1 < addr0 + PTSIZE; addr1 += PGSIZE) {
		ptep = get_pte(pgdir, addr1, 0);
		assert(ptep == NULL);
	}

	memset((void *)addr0, 0xEF, PGSIZE * 2);
	ptep = get_pte(pgdir, addr0, 0);
	assert(ptep != NULL && (*ptep & PTE_P));
	ptep = get_pte(pgdir, addr0 + PGSIZE, 0);
	assert(ptep != NULL && (*ptep & PTE_P));

	ret = mm_unmap(mm0, -PTSIZE, PTSIZE);
	assert(ret == -E_INVAL);
	ret = mm_unmap(mm0, addr0 + PTSIZE, PGSIZE);
	assert(ret == 0);

	addr1 = addr0 + PTSIZE / 2;
	ret = mm_unmap(mm0, addr1, PGSIZE);
	assert(ret == 0 && mm0->map_count == 2);

	ret = mm_unmap(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4);
	assert(ret == 0 && mm0->map_count == 3);

	ret = mm_map(mm0, addr1, PGSIZE * 6, 0, NULL);
	assert(ret == -E_INVAL);
	ret = mm_map(mm0, addr1, PGSIZE, 0, NULL);
	assert(ret == 0 && mm0->map_count == 4);
	ret = mm_map(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4, 0, NULL);
	assert(ret == 0 && mm0->map_count == 5);

	ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PTSIZE / 2 - PGSIZE);
	assert(ret == 0 && mm0->map_count == 1);

	addr1 = addr0 + PGSIZE;
	for (i = 0; i < PGSIZE; i++) {
		assert(*(char *)(addr1 + i) == (char)0xEF);
	}

	ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PGSIZE / 4);
	assert(ret == 0 && mm0->map_count == 2);
	ptep = get_pte(pgdir, addr0, 0);
	assert(ptep != NULL && (*ptep & PTE_P));
	ptep = get_pte(pgdir, addr0 + PGSIZE, 0);
	assert(ptep != NULL && *ptep == 0);

	ret = mm_map(mm0, addr1, PGSIZE, vm_flags, NULL);
	memset((void *)addr1, 0x88, PGSIZE);
	assert(*(char *)addr1 == (char)0x88 && mm0->map_count == 3);

	for (i = 1; i < 16; i += 2) {
		ret = mm_unmap(mm0, addr0 + PGSIZE * i, PGSIZE);
		assert(ret == 0);
		if (i < 8) {
			ret = mm_map(mm0, addr0 + PGSIZE * i, PGSIZE, 0, NULL);
			assert(ret == 0);
		}
	}
	assert(mm0->map_count == 13);

	ret = mm_unmap(mm0, addr0 + PGSIZE * 2, PTSIZE - PGSIZE * 2);
	assert(ret == 0 && mm0->map_count == 2);

	ret = mm_unmap(mm0, addr0, PGSIZE * 2);
	assert(ret == 0 && mm0->map_count == 0);

	kprintf("check_mm_swap: step2, mm_unmap ok.\n");

	// step3: check exit_mmap

	ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL);
	assert(ret == 0);

	for (i = 0, addr1 = addr0; i < 4; i++, addr1 += PGSIZE) {
		*(char *)addr1 = (char)0xFF;
	}

	exit_mmap(mm0);
	for (i = 0; i < PDX(KERNBASE); i++) {
		assert(pgdir[i] == 0);
	}

	kprintf("check_mm_swap: step3, exit_mmap ok.\n");

	// step4: check dup_mmap

	for (i = 0; i < max_swap_offset; i++) {
		assert(mem_map[i] == SWAP_UNUSED);
	}

	ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL);
	assert(ret != 0);

	addr1 = addr0;
	for (i = 0; i < 4; i++, addr1 += PGSIZE) {
		*(char *)addr1 = (char)(i * i);
	}

	ret = 0;
	ret += swap_out_mm(mm0, 10);
	ret += swap_out_mm(mm0, 10);
	assert(ret == 4);

	for (; i < 8; i++, addr1 += PGSIZE) {
		*(char *)addr1 = (char)(i * i);
	}

	// setup mm1

	mm1 = mm_create();
	assert(mm1 != NULL);

	page = alloc_page();
	assert(page != NULL);
	pgdir = page2kva(page);
	memcpy(pgdir, boot_pgdir, PGSIZE);
	pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
	mm1->pgdir = pgdir;

	ret = dup_mmap(mm1, mm0);
	assert(ret == 0);

	// switch to mm1

	check_mm_struct = mm1;
	lcr3(PADDR(mm1->pgdir));

	addr1 = addr0;
	for (i = 0; i < 8; i++, addr1 += PGSIZE) {
		assert(*(char *)addr1 == (char)(i * i));
		*(char *)addr1 = (char)0x88;
	}

	// switch to mm0

	check_mm_struct = mm0;
	lcr3(PADDR(mm0->pgdir));

	addr1 = addr0;
	for (i = 0; i < 8; i++, addr1 += PGSIZE) {
		assert(*(char *)addr1 == (char)(i * i));
	}

	// switch to boot_cr3

	check_mm_struct = NULL;
	lcr3(boot_cr3);

	// free memory

	exit_mmap(mm0);
	exit_mmap(mm1);

	free_page(kva2page(mm0->pgdir));
	mm_destroy(mm0);
	free_page(kva2page(mm1->pgdir));
	mm_destroy(mm1);

	kprintf("check_mm_swap: step4, dup_mmap ok.\n");

	refill_inactive_scan();
	page_launder();
	for (i = 0; i < max_swap_offset; i++) {
		assert(mem_map[i] == SWAP_UNUSED);
	}

	assert(nr_free_pages_store == nr_free_pages());
	assert(slab_allocated_store == slab_allocated());

	kprintf("check_mm_swap() succeeded.\n");
}
예제 #3
0
파일: file.c 프로젝트: tansinan/ucore_plus
void *linux_regfile_mmap2(void *addr, size_t len, int prot, int flags, int fd,
			  size_t off)
{
	int subret = -E_INVAL;
	struct mm_struct *mm = current->mm;
	assert(mm != NULL);
	if (len == 0) {
		return -1;
	}
	lock_mm(mm);

	uintptr_t start = ROUNDDOWN(addr, PGSIZE);
	len = ROUNDUP(len, PGSIZE);

	uint32_t vm_flags = VM_READ;
	if (prot & PROT_WRITE) {
		vm_flags |= VM_WRITE;
	}
	if (prot & PROT_EXEC) {
		vm_flags |= VM_EXEC;
	}
	if (flags & MAP_STACK) {
		vm_flags |= VM_STACK;
	}
	if (flags & MAP_ANONYMOUS) {
		vm_flags |= VM_ANONYMOUS;
	}

	subret = -E_NO_MEM;
	if (start == 0 && (start = get_unmapped_area(mm, len)) == 0) {
		goto out_unlock;
	}
	uintptr_t end = start + len;
  //kprintf("start = %llx, end = %llx, len = %llx", start, end, len);
	struct vma_struct *vma = find_vma(mm, start);
	if (vma == NULL || vma->vm_start >= end) {
		vma = NULL;
	} else if (!(flags & MAP_FIXED)) {
		start = get_unmapped_area(mm, len);
		vma = NULL;
	} else if (!(vma->vm_flags & VM_ANONYMOUS)) {
		goto out_unlock;
	} else if (vma->vm_start == start && end == vma->vm_end) {
		vma->vm_flags = vm_flags;
	} else {
    if(vma->vm_start > start || end > vma->vm_end) {
      goto out_unlock;
    }
		if ((subret = mm_unmap_keep_pages(mm, start, len)) != 0) {
			goto out_unlock;
		}
		vma = NULL;
	}
	if (vma == NULL
	    && (subret = mm_map(mm, start, len, vm_flags, &vma)) != 0) {
		goto out_unlock;
	}
	if (!(flags & MAP_ANONYMOUS)) {
    struct file_desc_table *desc_table = fs_get_desc_table(current->fs_struct);
    struct file* file = file_desc_table_get_file(desc_table, fd);
    vma_mapfile(vma, file, off, current->fs_struct);
		//vma_mapfile(vma, fd, off << 12, NULL);
	}
	subret = 0;
out_unlock:
	unlock_mm(mm);
	return subret == 0 ? start : -1;
}
예제 #4
0
파일: proc.c 프로젝트: liangchao1992/ucore
static int load_icode(int fd, int argc, char **kargv, int envc, char **kenvp)
{
	assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
	assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
	if (current->mm != NULL) {
		panic("load_icode: current->mm must be empty.\n");
	}

	int ret = -E_NO_MEM;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t real_entry;
//#endif //UCONFIG_BIONIC_LIBC

	struct mm_struct *mm;
	if ((mm = mm_create()) == NULL) {
		goto bad_mm;
	}

	if (setup_pgdir(mm) != 0) {
		goto bad_pgdir_cleanup_mm;
	}

	mm->brk_start = 0;

	struct Page *page;

	struct elfhdr __elf, *elf = &__elf;
	if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
		goto bad_elf_cleanup_pgdir;
	}

	if (elf->e_magic != ELF_MAGIC) {
		ret = -E_INVAL_ELF;
		goto bad_elf_cleanup_pgdir;
	}
//#ifdef UCONFIG_BIONIC_LIBC
	real_entry = elf->e_entry;

	uint32_t load_address, load_address_flag = 0;
//#endif //UCONFIG_BIONIC_LIBC

	struct proghdr __ph, *ph = &__ph;
	uint32_t vm_flags, phnum;
	pte_perm_t perm = 0;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t is_dynamic = 0, interp_idx;
	uint32_t bias = 0;
//#endif //UCONFIG_BIONIC_LIBC
	for (phnum = 0; phnum < elf->e_phnum; phnum++) {
		off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		if (ph->p_type == ELF_PT_INTERP) {
			is_dynamic = 1;
			interp_idx = phnum;
			continue;
		}

		if (ph->p_type != ELF_PT_LOAD) {
			continue;
		}
		if (ph->p_filesz > ph->p_memsz) {
			ret = -E_INVAL_ELF;
			goto bad_cleanup_mmap;
		}

		if (ph->p_va == 0 && !bias) {
			bias = 0x00008000;
		}

		if ((ret = map_ph(fd, ph, mm, &bias, 0)) != 0) {
			kprintf("load address: 0x%08x size: %d\n", ph->p_va,
				ph->p_memsz);
			goto bad_cleanup_mmap;
		}

		if (load_address_flag == 0)
			load_address = ph->p_va + bias;
		++load_address_flag;

	  /*********************************************/
		/*
		   vm_flags = 0;
		   ptep_set_u_read(&perm);
		   if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
		   if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
		   if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
		   if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

		   if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
		   goto bad_cleanup_mmap;
		   }

		   if (mm->brk_start < ph->p_va + ph->p_memsz) {
		   mm->brk_start = ph->p_va + ph->p_memsz;
		   }

		   off_t offset = ph->p_offset;
		   size_t off, size;
		   uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

		   end = ph->p_va + ph->p_filesz;
		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
		   goto bad_cleanup_mmap;
		   }
		   start += size, offset += size;
		   }

		   end = ph->p_va + ph->p_memsz;

		   if (start < la) {
		   // ph->p_memsz == ph->p_filesz 
		   if (start == end) {
		   continue ;
		   }
		   off = start + PGSIZE - la, size = PGSIZE - off;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   assert((end < la && start == end) || (end >= la && start == la));
		   }

		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   }
		 */
	  /**************************************/
	}

	mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

	/* setup user stack */
	vm_flags = VM_READ | VM_WRITE | VM_STACK;
	if ((ret =
	     mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags,
		    NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (is_dynamic) {
		elf->e_entry += bias;

		bias = 0;

		off_t phoff =
		    elf->e_phoff + sizeof(struct proghdr) * interp_idx;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		char *interp_path = (char *)kmalloc(ph->p_filesz);
		load_icode_read(fd, interp_path, ph->p_filesz, ph->p_offset);

		int interp_fd = sysfile_open(interp_path, O_RDONLY);
		assert(interp_fd >= 0);
		struct elfhdr interp___elf, *interp_elf = &interp___elf;
		assert((ret =
			load_icode_read(interp_fd, interp_elf,
					sizeof(struct elfhdr), 0)) == 0);
		assert(interp_elf->e_magic == ELF_MAGIC);

		struct proghdr interp___ph, *interp_ph = &interp___ph;
		uint32_t interp_phnum;
		uint32_t va_min = 0xffffffff, va_max = 0;
		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			if (va_min > interp_ph->p_va)
				va_min = interp_ph->p_va;
			if (va_max < interp_ph->p_va + interp_ph->p_memsz)
				va_max = interp_ph->p_va + interp_ph->p_memsz;
		}

		bias = get_unmapped_area(mm, va_max - va_min + 1 + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);

		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			assert((ret =
				map_ph(interp_fd, interp_ph, mm, &bias,
				       1)) == 0);
		}

		real_entry = interp_elf->e_entry + bias;

		sysfile_close(interp_fd);
		kfree(interp_path);
	}

	sysfile_close(fd);

	bool intr_flag;
	local_intr_save(intr_flag);
	{
		list_add(&(proc_mm_list), &(mm->proc_mm_link));
	}
	local_intr_restore(intr_flag);
	mm_count_inc(mm);
	current->mm = mm;
	set_pgdir(current, mm->pgdir);
	mp_set_mm_pagetable(mm);

	if (!is_dynamic) {
		real_entry += bias;
	}
#ifdef UCONFIG_BIONIC_LIBC
	if (init_new_context_dynamic(current, elf, argc, kargv, envc, kenvp,
				     is_dynamic, real_entry, load_address,
				     bias) < 0)
		goto bad_cleanup_mmap;
#else
	if (init_new_context(current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;
#endif //UCONFIG_BIONIC_LIBC
	ret = 0;
out:
	return ret;
bad_cleanup_mmap:
	exit_mmap(mm);
bad_elf_cleanup_pgdir:
	put_pgdir(mm);
bad_pgdir_cleanup_mm:
	mm_destroy(mm);
bad_mm:
	goto out;
}
예제 #5
0
파일: proc.c 프로젝트: liangchao1992/ucore
//#ifdef UCONFIG_BIONIC_LIBC
static int
map_ph(int fd, struct proghdr *ph, struct mm_struct *mm, uint32_t * pbias,
       uint32_t linker)
{
	int ret = 0;
	struct Page *page;
	uint32_t vm_flags = 0;
	uint32_t bias = 0;
	pte_perm_t perm = 0;
	ptep_set_u_read(&perm);

	if (ph->p_flags & ELF_PF_X)
		vm_flags |= VM_EXEC;
	if (ph->p_flags & ELF_PF_W)
		vm_flags |= VM_WRITE;
	if (ph->p_flags & ELF_PF_R)
		vm_flags |= VM_READ;

	if (vm_flags & VM_WRITE)
		ptep_set_u_write(&perm);

	if (pbias) {
		bias = *pbias;
	}
	if (!bias && !ph->p_va) {
		bias = get_unmapped_area(mm, ph->p_memsz + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);
		if (pbias)
			*pbias = bias;
	}

	if ((ret =
	     mm_map(mm, ph->p_va + bias, ph->p_memsz, vm_flags, NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (!linker && mm->brk_start < ph->p_va + bias + ph->p_memsz) {
		mm->brk_start = ph->p_va + bias + ph->p_memsz;
	}

	off_t offset = ph->p_offset;
	size_t off, size;
	uintptr_t start = ph->p_va + bias, end, la = ROUNDDOWN(start, PGSIZE);

	end = ph->p_va + bias + ph->p_filesz;
	while (start < end) {
		if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
			ret = -E_NO_MEM;
			goto bad_cleanup_mmap;
		}
		off = start - la, size = PGSIZE - off, la += PGSIZE;
		if (end < la) {
			size -= la - end;
		}
		if ((ret =
		     load_icode_read(fd, page2kva(page) + off, size,
				     offset)) != 0) {
			goto bad_cleanup_mmap;
		}
		start += size, offset += size;
	}

	end = ph->p_va + bias + ph->p_memsz;

	if (start < la) {
		if (start == end) {
			goto normal_exit;
		}
		off = start + PGSIZE - la, size = PGSIZE - off;
		if (end < la) {
			size -= la - end;
		}
		memset(page2kva(page) + off, 0, size);
		start += size;
		assert((end < la && start == end)
		       || (end >= la && start == la));
	}

	while (start < end) {
		if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
			ret = -E_NO_MEM;
			goto bad_cleanup_mmap;
		}
		off = start - la, size = PGSIZE - off, la += PGSIZE;
		if (end < la) {
			size -= la - end;
		}
		memset(page2kva(page) + off, 0, size);
		start += size;
	}
normal_exit:
	return 0;
bad_cleanup_mmap:
	return ret;
}
예제 #6
0
파일: proc.c 프로젝트: liangchao1992/ucore
int do_mprotect(void *addr, size_t len, int prot)
{

	/*
	   return 0; 
	 */

	struct mm_struct *mm = current->mm;
	assert(mm != NULL);
	if (len == 0) {
		return -E_INVAL;
	}
	uintptr_t start = ROUNDDOWN(addr, PGSIZE);
	uintptr_t end = ROUNDUP(addr + len, PGSIZE);

	int ret = -E_INVAL;
	lock_mm(mm);

	while (1) {
		struct vma_struct *vma = find_vma(mm, start);
		uintptr_t last_end;
		if (vma != NULL) {
			last_end = vma->vm_end;
		}
		if (vma == NULL) {
			goto out;
		} else if (vma->vm_start == start && vma->vm_end == end) {
			if (prot & PROT_WRITE) {
				vma->vm_flags |= VM_WRITE;
			} else {
				vma->vm_flags &= ~VM_WRITE;
			}
		} else {
			uintptr_t this_end =
			    (end <= vma->vm_end) ? end : vma->vm_end;
			uintptr_t this_start =
			    (start >= vma->vm_start) ? start : vma->vm_start;

			struct mapped_file_struct mfile = vma->mfile;
			mfile.offset += this_start - vma->vm_start;
			uint32_t flags = vma->vm_flags;
			if ((ret =
			     mm_unmap_keep_pages(mm, this_start,
						 this_end - this_start)) != 0) {
				goto out;
			}
			if (prot & PROT_WRITE) {
				flags |= VM_WRITE;
			} else {
				flags &= ~VM_WRITE;
			}
			if ((ret =
			     mm_map(mm, this_start, this_end - this_start,
				    flags, &vma)) != 0) {
				goto out;
			}
			vma->mfile = mfile;
			if (vma->mfile.file != NULL) {
				filemap_acquire(mfile.file);
			}
		}

		ret = 0;

		if (end <= last_end)
			break;
		start = last_end;
	}

out:
	unlock_mm(mm);
	return ret;
}