static int open_proc_mm(struct inode *inode, struct file *file)
{
	struct mm_struct *mm = mm_alloc();
	int ret;

	ret = -ENOMEM;
	if(mm == NULL)
		goto out_mem;

	ret = init_new_context(current, mm);
	if(ret)
		goto out_free;

	spin_lock(&mmlist_lock);
	list_add(&mm->mmlist, &current->mm->mmlist);
	mmlist_nr++;
	spin_unlock(&mmlist_lock);

	file->private_data = mm;

	return(0);

 out_free:
	mmput(mm);
 out_mem:
	return(ret);
}
Пример #2
0
/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	init_new_context(current, &init_mm);
	current_thread_info()->cpu = 0;
	smp_tune_scheduling();
	plat_prepare_cpus(max_cpus);
}
Пример #3
0
/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	if (!oldmm)
		return NULL;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));
	mm_init_cpumask(mm);

	/* Initializing for Swap token stuff */
	mm->token_priority = 0;
	mm->last_interval = 0;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	mm->pmd_huge_pte = NULL;
#endif

	if (!mm_init(mm, tsk))
		goto fail_nomem;

	if (init_new_context(tsk, mm))
		goto fail_nocontext;

	dup_mm_exe_file(oldmm, mm);

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

	if (mm->binfmt && !try_module_get(mm->binfmt->module))
		goto free_pt;

	return mm;

free_pt:
	/* don't put binfmt in mmput, we haven't got module yet */
	mm->binfmt = NULL;
	mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return NULL;
}
/*
 * Create a new mm_struct and populate it with a temporary stack
 * vm_area_struct.  We don't have enough context at this point to set the stack
 * flags, permissions, and offset, so we use temporary values.  We'll update
 * them later in setup_arg_pages().
 */
int bprm_mm_init(struct linux_binprm *bprm)
{
	int err;
	struct mm_struct *mm = NULL;

	bprm->mm = mm = mm_alloc();
	err = -ENOMEM;
	if (!mm)
		goto err;

	err = init_new_context(current, mm);
	if (err)
		goto err;

	err = __bprm_mm_init(bprm);
	if (err)
		goto err;

	return 0;

err:
	if (mm) {
		bprm->mm = NULL;
		mmdrop(mm);
	}

	return err;
}
Пример #5
0
/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	init_new_context(current, &init_mm);
	current_thread_info()->cpu = 0;
	plat_prepare_cpus(max_cpus);
#ifndef CONFIG_HOTPLUG_CPU
	cpu_present_map = cpu_possible_map;
#endif
}
Пример #6
0
struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	if (!oldmm)
		return NULL;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));
	mm_init_cpumask(mm);

	
	mm->token_priority = 0;
	mm->last_interval = 0;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	mm->pmd_huge_pte = NULL;
#endif

	if (!mm_init(mm, tsk))
		goto fail_nomem;

	if (init_new_context(tsk, mm))
		goto fail_nocontext;

	dup_mm_exe_file(oldmm, mm);

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

	if (mm->binfmt && !try_module_get(mm->binfmt->module))
		goto free_pt;

	return mm;

free_pt:
	
	mm->binfmt = NULL;
	mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	mm_free_pgd(mm);
	free_mm(mm);
	return NULL;
}
Пример #7
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int cpu = smp_processor_id();

	init_new_context(current, &init_mm);
	current_thread_info()->cpu = cpu;
	mp_ops->prepare_cpus(max_cpus);

#ifndef CONFIG_HOTPLUG_CPU
	init_cpu_present(cpu_possible_mask);
#endif
}
Пример #8
0
/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	if (!oldmm)
		return NULL;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));

	/* Initializing for Swap token stuff */
	mm->token_priority = 0;
	mm->last_interval = 0;

	if (!mm_init(mm, tsk))
		goto fail_nomem;

	if (init_new_context(tsk, mm))
		goto fail_nocontext;

	dup_mm_exe_file(oldmm, mm);

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

	return mm;

free_pt:
	mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return NULL;
}
Пример #9
0
static bool __init efi_virtmap_init(void)
{
	efi_memory_desc_t *md;
	bool systab_found;

	efi_mm.pgd = pgd_alloc(&efi_mm);
	init_new_context(NULL, &efi_mm);

	systab_found = false;
	for_each_efi_memory_desc(md) {
		phys_addr_t phys = md->phys_addr;
		int ret;

		if (!(md->attribute & EFI_MEMORY_RUNTIME))
			continue;
		if (md->virt_addr == 0)
			return false;

		ret = efi_create_mapping(&efi_mm, md);
		if  (!ret) {
			pr_info("  EFI remap %pa => %p\n",
				&phys, (void *)(unsigned long)md->virt_addr);
		} else {
			pr_warn("  EFI remap %pa: failed to create mapping (%d)\n",
				&phys, ret);
			return false;
		}
		/*
		 * If this entry covers the address of the UEFI system table,
		 * calculate and record its virtual address.
		 */
		if (efi_system_table >= phys &&
		    efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
			efi.systab = (void *)(unsigned long)(efi_system_table -
							     phys + md->virt_addr);
			systab_found = true;
		}
	}
	if (!systab_found) {
		pr_err("No virtual mapping found for the UEFI System Table\n");
		return false;
	}

	if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
		return false;

	return true;
}
Пример #10
0
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
{
	mm->mmap = NULL;
	mm->mm_rb = RB_ROOT;
	mm->vmacache_seqnum = 0;
	atomic_set(&mm->mm_users, 1);
	atomic_set(&mm->mm_count, 1);
	init_rwsem(&mm->mmap_sem);
	INIT_LIST_HEAD(&mm->mmlist);
	mm->core_state = NULL;
	atomic_long_set(&mm->nr_ptes, 0);
	mm_nr_pmds_init(mm);
	mm->map_count = 0;
	mm->locked_vm = 0;
	mm->pinned_vm = 0;
	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
	spin_lock_init(&mm->page_table_lock);
	mm_init_cpumask(mm);
	mm_init_aio(mm);
	mm_init_owner(mm, p);
	mmu_notifier_mm_init(mm);
	clear_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
	mm->pmd_huge_pte = NULL;
#endif

	if (current->mm) {
		mm->flags = current->mm->flags & MMF_INIT_MASK;
		mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
	} else {
		mm->flags = default_dump_filter;
		mm->def_flags = 0;
	}

	if (mm_alloc_pgd(mm))
		goto fail_nopgd;

	if (init_new_context(p, mm))
		goto fail_nocontext;

	return mm;

fail_nocontext:
	mm_free_pgd(mm);
fail_nopgd:
	free_mm(mm);
	return NULL;
}
Пример #11
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 * The parameters are pointers to where to stick the starting and ending
 * addresses  of available kernel virtual memory.
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};

	/* allocate some pages for kernel housekeeping tasks */
	empty_bad_page_table	= (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
	empty_bad_page		= (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
	empty_zero_page		= (unsigned long) alloc_bootmem_pages(PAGE_SIZE);

	memset((void *) empty_zero_page, 0, PAGE_SIZE);

#if CONFIG_HIGHMEM
	if (num_physpages - num_mappedpages) {
		pgd_t *pge;
		pud_t *pue;
		pmd_t *pme;

		pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE);

		memset(pkmap_page_table, 0, PAGE_SIZE);

		pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE);
		pue = pud_offset(pge, PKMAP_BASE);
		pme = pmd_offset(pue, PKMAP_BASE);
		__set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE);
	}
#endif

	/* distribute the allocatable pages across the various zones and pass them to the allocator
	 */
	zones_size[ZONE_DMA]     = max_low_pfn - min_low_pfn;
	zones_size[ZONE_NORMAL]  = 0;
#ifdef CONFIG_HIGHMEM
	zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages;
#endif

	free_area_init(zones_size);

#ifdef CONFIG_MMU
	/* initialise init's MMU context */
	init_new_context(&init_task, &init_mm);
#endif

} /* end paging_init() */
Пример #12
0
/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
static struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	if (!oldmm)
		return NULL;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));

	if (!mm_init(mm))
		goto fail_nomem;

	if (init_new_context(tsk, mm))
		goto fail_nocontext;

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

	return mm;

free_pt:
	mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return NULL;
}
Пример #13
0
/*
 * We need our own copy of the higher levels of the page tables
 * because we want to avoid inserting EFI region mappings (EFI_VA_END
 * to EFI_VA_START) into the standard kernel page tables. Everything
 * else can be shared, see efi_sync_low_kernel_mappings().
 *
 * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
 * allocation.
 */
int __init efi_alloc_page_tables(void)
{
	pgd_t *pgd, *efi_pgd;
	p4d_t *p4d;
	pud_t *pud;
	gfp_t gfp_mask;

	if (efi_enabled(EFI_OLD_MEMMAP))
		return 0;

	gfp_mask = GFP_KERNEL | __GFP_ZERO;
	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
	if (!efi_pgd)
		return -ENOMEM;

	pgd = efi_pgd + pgd_index(EFI_VA_END);
	p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
	if (!p4d) {
		free_page((unsigned long)efi_pgd);
		return -ENOMEM;
	}

	pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
	if (!pud) {
		if (pgtable_l5_enabled)
			free_page((unsigned long) pgd_page_vaddr(*pgd));
		free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
		return -ENOMEM;
	}

	efi_mm.pgd = efi_pgd;
	mm_init_cpumask(&efi_mm);
	init_new_context(NULL, &efi_mm);

	return 0;
}
Пример #14
0
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
	struct mm_struct * mm, *oldmm;
	int retval;

	tsk->min_flt = tsk->maj_flt = 0;
	tsk->nvcsw = tsk->nivcsw = 0;

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm)
		return 0;

	if (clone_flags & CLONE_VM) {
		atomic_inc(&oldmm->mm_users);
		mm = oldmm;
		/*
		 * There are cases where the PTL is held to ensure no
		 * new threads start up in user mode using an mm, which
		 * allows optimizing out ipis; the tlb_gather_mmu code
		 * is an example.
		 */
		spin_unlock_wait(&oldmm->page_table_lock);
		goto good_mm;
	}

	retval = -ENOMEM;
	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	/* Copy the current MM stuff.. */
	memcpy(mm, oldmm, sizeof(*mm));
	if (!mm_init(mm))
		goto fail_nomem;

	if (init_new_context(tsk,mm))
		goto fail_nocontext;

	retval = dup_mmap(mm, oldmm);
	if (retval)
		goto free_pt;

	mm->hiwater_rss = mm->rss;
	mm->hiwater_vm = mm->total_vm;

good_mm:
	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

free_pt:
	mmput(mm);
fail_nomem:
	return retval;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return retval;
}
Пример #15
0
static int load_icode(int fd, int argc, char **kargv, int envc, char **kenvp)
{
	assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
	assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
	if (current->mm != NULL) {
		panic("load_icode: current->mm must be empty.\n");
	}

	int ret = -E_NO_MEM;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t real_entry;
//#endif //UCONFIG_BIONIC_LIBC

	struct mm_struct *mm;
	if ((mm = mm_create()) == NULL) {
		goto bad_mm;
	}

	if (setup_pgdir(mm) != 0) {
		goto bad_pgdir_cleanup_mm;
	}

	mm->brk_start = 0;

	struct Page *page;

	struct elfhdr __elf, *elf = &__elf;
	if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
		goto bad_elf_cleanup_pgdir;
	}

	if (elf->e_magic != ELF_MAGIC) {
		ret = -E_INVAL_ELF;
		goto bad_elf_cleanup_pgdir;
	}
//#ifdef UCONFIG_BIONIC_LIBC
	real_entry = elf->e_entry;

	uint32_t load_address, load_address_flag = 0;
//#endif //UCONFIG_BIONIC_LIBC

	struct proghdr __ph, *ph = &__ph;
	uint32_t vm_flags, phnum;
	pte_perm_t perm = 0;

//#ifdef UCONFIG_BIONIC_LIBC
	uint32_t is_dynamic = 0, interp_idx;
	uint32_t bias = 0;
//#endif //UCONFIG_BIONIC_LIBC
	for (phnum = 0; phnum < elf->e_phnum; phnum++) {
		off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		if (ph->p_type == ELF_PT_INTERP) {
			is_dynamic = 1;
			interp_idx = phnum;
			continue;
		}

		if (ph->p_type != ELF_PT_LOAD) {
			continue;
		}
		if (ph->p_filesz > ph->p_memsz) {
			ret = -E_INVAL_ELF;
			goto bad_cleanup_mmap;
		}

		if (ph->p_va == 0 && !bias) {
			bias = 0x00008000;
		}

		if ((ret = map_ph(fd, ph, mm, &bias, 0)) != 0) {
			kprintf("load address: 0x%08x size: %d\n", ph->p_va,
				ph->p_memsz);
			goto bad_cleanup_mmap;
		}

		if (load_address_flag == 0)
			load_address = ph->p_va + bias;
		++load_address_flag;

	  /*********************************************/
		/*
		   vm_flags = 0;
		   ptep_set_u_read(&perm);
		   if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
		   if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
		   if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
		   if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

		   if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
		   goto bad_cleanup_mmap;
		   }

		   if (mm->brk_start < ph->p_va + ph->p_memsz) {
		   mm->brk_start = ph->p_va + ph->p_memsz;
		   }

		   off_t offset = ph->p_offset;
		   size_t off, size;
		   uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

		   end = ph->p_va + ph->p_filesz;
		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
		   goto bad_cleanup_mmap;
		   }
		   start += size, offset += size;
		   }

		   end = ph->p_va + ph->p_memsz;

		   if (start < la) {
		   // ph->p_memsz == ph->p_filesz 
		   if (start == end) {
		   continue ;
		   }
		   off = start + PGSIZE - la, size = PGSIZE - off;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   assert((end < la && start == end) || (end >= la && start == la));
		   }

		   while (start < end) {
		   if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
		   ret = -E_NO_MEM;
		   goto bad_cleanup_mmap;
		   }
		   off = start - la, size = PGSIZE - off, la += PGSIZE;
		   if (end < la) {
		   size -= la - end;
		   }
		   memset(page2kva(page) + off, 0, size);
		   start += size;
		   }
		 */
	  /**************************************/
	}

	mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

	/* setup user stack */
	vm_flags = VM_READ | VM_WRITE | VM_STACK;
	if ((ret =
	     mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags,
		    NULL)) != 0) {
		goto bad_cleanup_mmap;
	}

	if (is_dynamic) {
		elf->e_entry += bias;

		bias = 0;

		off_t phoff =
		    elf->e_phoff + sizeof(struct proghdr) * interp_idx;
		if ((ret =
		     load_icode_read(fd, ph, sizeof(struct proghdr),
				     phoff)) != 0) {
			goto bad_cleanup_mmap;
		}

		char *interp_path = (char *)kmalloc(ph->p_filesz);
		load_icode_read(fd, interp_path, ph->p_filesz, ph->p_offset);

		int interp_fd = sysfile_open(interp_path, O_RDONLY);
		assert(interp_fd >= 0);
		struct elfhdr interp___elf, *interp_elf = &interp___elf;
		assert((ret =
			load_icode_read(interp_fd, interp_elf,
					sizeof(struct elfhdr), 0)) == 0);
		assert(interp_elf->e_magic == ELF_MAGIC);

		struct proghdr interp___ph, *interp_ph = &interp___ph;
		uint32_t interp_phnum;
		uint32_t va_min = 0xffffffff, va_max = 0;
		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			if (va_min > interp_ph->p_va)
				va_min = interp_ph->p_va;
			if (va_max < interp_ph->p_va + interp_ph->p_memsz)
				va_max = interp_ph->p_va + interp_ph->p_memsz;
		}

		bias = get_unmapped_area(mm, va_max - va_min + 1 + PGSIZE);
		bias = ROUNDUP(bias, PGSIZE);

		for (interp_phnum = 0; interp_phnum < interp_elf->e_phnum;
		     ++interp_phnum) {
			off_t interp_phoff =
			    interp_elf->e_phoff +
			    sizeof(struct proghdr) * interp_phnum;
			assert((ret =
				load_icode_read(interp_fd, interp_ph,
						sizeof(struct proghdr),
						interp_phoff)) == 0);
			if (interp_ph->p_type != ELF_PT_LOAD) {
				continue;
			}
			assert((ret =
				map_ph(interp_fd, interp_ph, mm, &bias,
				       1)) == 0);
		}

		real_entry = interp_elf->e_entry + bias;

		sysfile_close(interp_fd);
		kfree(interp_path);
	}

	sysfile_close(fd);

	bool intr_flag;
	local_intr_save(intr_flag);
	{
		list_add(&(proc_mm_list), &(mm->proc_mm_link));
	}
	local_intr_restore(intr_flag);
	mm_count_inc(mm);
	current->mm = mm;
	set_pgdir(current, mm->pgdir);
	mm->cpuid = myid();
	mp_set_mm_pagetable(mm);

	if (!is_dynamic) {
		real_entry += bias;
	}
#ifdef UCONFIG_BIONIC_LIBC
	if (init_new_context_dynamic(current, elf, argc, kargv, envc, kenvp,
				     is_dynamic, real_entry, load_address,
				     bias) < 0)
		goto bad_cleanup_mmap;
#else
	if (init_new_context(current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;
#endif //UCONFIG_BIONIC_LIBC
	ret = 0;
out:
	return ret;
bad_cleanup_mmap:
	exit_mmap(mm);
bad_elf_cleanup_pgdir:
	put_pgdir(mm);
bad_pgdir_cleanup_mm:
	mm_destroy(mm);
bad_mm:
	goto out;
}
/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	if (!oldmm)
		return NULL;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));
	mm_init_cpumask(mm);

	/* Initializing for Swap token stuff */
	mm->token_priority = 0;
	mm->last_interval = 0;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	mm->pmd_huge_pte = NULL;
#endif
	uprobe_reset_state(mm);

	if (!mm_init(mm, tsk))
		goto fail_nomem;

	if (init_new_context(tsk, mm))
		goto fail_nocontext;

	dup_mm_exe_file(oldmm, mm);

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

#ifdef CONFIG_HOMECACHE
	{
		/* Reset vm_pid on all vmas.  In the new mm_struct, we
		 * want to switch anything that was associated with
		 * the parent to be associated with the child, and
		 * clear everything else.
		 */
		struct vm_area_struct *mpnt;
		down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
		for (mpnt = mm->mmap; mpnt; mpnt = mpnt->vm_next) {
			if (mpnt->vm_pid == current->pid)
				mpnt->vm_pid = tsk->pid;
			else
				mpnt->vm_pid = 0;
		}
		up_write(&mm->mmap_sem);
	}
#endif

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

	if (mm->binfmt && !try_module_get(mm->binfmt->module))
		goto free_pt;

	return mm;

free_pt:
	/* don't put binfmt in mmput, we haven't got module yet */
	mm->binfmt = NULL;
	mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return NULL;
}
Пример #17
0
void __init smp_boot_cpus(void)
{
	int i;

	smp_num_cpus = prom_setup_smp();
	init_new_context(current, &init_mm);
	current->processor = 0;
	atomic_set(&cpus_booted, 1);  /* Master CPU is already booted... */
	init_idle();
	for (i = 1; i < smp_num_cpus; i++) {
		struct task_struct *p;
		struct pt_regs regs;
		printk("Starting CPU %d... ", i);

		/* Spawn a new process normally.  Grab a pointer to
		   its task struct so we can mess with it */
		do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
		p = init_task.prev_task;

		/* Schedule the first task manually */
		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */

		/* Attach to the address space of init_task. */
		atomic_inc(&init_mm.mm_count);
		p->active_mm = &init_mm;
		init_tasks[i] = p;

		del_from_runqueue(p);
		unhash_process(p);

		prom_boot_secondary(i,
				    (unsigned long)p + KERNEL_STACK_SIZE - 32,
				    (unsigned long)p);

#if 0

		/* This is copied from the ip-27 code in the mips64 tree */

		struct task_struct *p;

		/*
		 * The following code is purely to make sure
		 * Linux can schedule processes on this slave.
		 */
		kernel_thread(0, NULL, CLONE_PID);
		p = init_task.prev_task;
		sprintf(p->comm, "%s%d", "Idle", i);
		init_tasks[i] = p;
		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually *
		del_from_runqueue(p);
		unhash_process(p);
		/* Attach to the address space of init_task. */
		atomic_inc(&init_mm.mm_count);
		p->active_mm = &init_mm;
		prom_boot_secondary(i, 
				    (unsigned long)p + KERNEL_STACK_SIZE - 32,
				    (unsigned long)p);
#endif
	}

	/* Wait for everyone to come up */
	while (atomic_read(&cpus_booted) != smp_num_cpus);
}
Пример #18
0
static int
load_icode(int fd, int argc, char **kargv, int envc, char **kenvp) {
    assert(argc >= 0 && argc <= EXEC_MAX_ARG_NUM);
    assert(envc >= 0 && envc <= EXEC_MAX_ENV_NUM);
    if (current->mm != NULL) {
        panic("load_icode: current->mm must be empty.\n");
    }

    int ret = -E_NO_MEM;

    struct mm_struct *mm;
    if ((mm = mm_create()) == NULL) {
        goto bad_mm;
    }

    if (setup_pgdir(mm) != 0) {
        goto bad_pgdir_cleanup_mm;
    }

    mm->brk_start = 0;

    struct Page *page;

    struct elfhdr __elf, *elf = &__elf;
    if ((ret = load_icode_read(fd, elf, sizeof(struct elfhdr), 0)) != 0) {
        goto bad_elf_cleanup_pgdir;
    }
	
    if (elf->e_magic != ELF_MAGIC) {
        ret = -E_INVAL_ELF;
        goto bad_elf_cleanup_pgdir;
    }

    struct proghdr __ph, *ph = &__ph;
    uint32_t vm_flags, phnum;
    pte_perm_t perm = 0;
    for (phnum = 0; phnum < elf->e_phnum; phnum ++) {
      off_t phoff = elf->e_phoff + sizeof(struct proghdr) * phnum;
      if ((ret = load_icode_read(fd, ph, sizeof(struct proghdr), phoff)) != 0) {
        goto bad_cleanup_mmap;
      }
      if (ph->p_type != ELF_PT_LOAD) {
        continue ;
      }
      if (ph->p_filesz > ph->p_memsz) {
        ret = -E_INVAL_ELF;
        goto bad_cleanup_mmap;
      }
      vm_flags = 0;
      ptep_set_u_read(&perm);
      if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
      if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
      if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
      if (vm_flags & VM_WRITE) ptep_set_u_write(&perm);

      if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
        goto bad_cleanup_mmap;
      }

      if (mm->brk_start < ph->p_va + ph->p_memsz) {
        mm->brk_start = ph->p_va + ph->p_memsz;
      }

      off_t offset = ph->p_offset;
      size_t off, size;
      uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);

      end = ph->p_va + ph->p_filesz;
      while (start < end) {
        if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
          ret = -E_NO_MEM;
          goto bad_cleanup_mmap;
        }
        off = start - la, size = PGSIZE - off, la += PGSIZE;
        if (end < la) {
          size -= la - end;
        }
        if ((ret = load_icode_read(fd, page2kva(page) + off, size, offset)) != 0) {
          goto bad_cleanup_mmap;
        }
        start += size, offset += size;
      }

      end = ph->p_va + ph->p_memsz;

      if (start < la) {
        /* ph->p_memsz == ph->p_filesz */
        if (start == end) {
          continue ;
        }
        off = start + PGSIZE - la, size = PGSIZE - off;
        if (end < la) {
          size -= la - end;
        }
        memset(page2kva(page) + off, 0, size);
        start += size;
        assert((end < la && start == end) || (end >= la && start == la));
      }

      while (start < end) {
        if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
          ret = -E_NO_MEM;
          goto bad_cleanup_mmap;
        }
        off = start - la, size = PGSIZE - off, la += PGSIZE;
        if (end < la) {
          size -= la - end;
        }
        memset(page2kva(page) + off, 0, size);
        start += size;
      }
    }
    sysfile_close(fd);

    mm->brk_start = mm->brk = ROUNDUP(mm->brk_start, PGSIZE);

    /* setup user stack */
    vm_flags = VM_READ | VM_WRITE | VM_STACK;
    if ((ret = mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags, NULL)) != 0) {
      goto bad_cleanup_mmap;
    }

    bool intr_flag;
    local_intr_save(intr_flag);
    {
      list_add(&(proc_mm_list), &(mm->proc_mm_link));
    }
    local_intr_restore(intr_flag);
    mm_count_inc(mm);
    current->mm = mm;
    set_pgdir(current, mm->pgdir);
    mm->lapic = pls_read(lapic_id);
    mp_set_mm_pagetable(mm);

    if (init_new_context (current, elf, argc, kargv, envc, kenvp) < 0)
		goto bad_cleanup_mmap;

    ret = 0;
out:
    return ret;
bad_cleanup_mmap:
    exit_mmap(mm);
bad_elf_cleanup_pgdir:
    put_pgdir(mm);
bad_pgdir_cleanup_mm:
    mm_destroy(mm);
bad_mm:
    goto out;
}