Example #1
0
load_return_t
load_machfile(
	struct image_params	*imgp,
	struct mach_header	*header,
	thread_t 		thread,
	vm_map_t 		new_map,
	load_result_t		*result
)
{
	struct vnode		*vp = imgp->ip_vp;
	off_t			file_offset = imgp->ip_arch_offset;
	off_t			macho_size = imgp->ip_arch_size;
	off_t			file_size = imgp->ip_vattr->va_data_size;
	
	pmap_t			pmap = 0;	/* protected by create_map */
	vm_map_t		map;
	vm_map_t		old_map;
	task_t			old_task = TASK_NULL; /* protected by create_map */
	load_result_t		myresult;
	load_return_t		lret;
	boolean_t create_map = FALSE;
	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
	task_t task = current_task();
	proc_t p = current_proc();
	mach_vm_offset_t	aslr_offset = 0;
	kern_return_t 		kret;

	if (macho_size > file_size) {
		return(LOAD_BADMACHO);
	}

	if (new_map == VM_MAP_NULL) {
		create_map = TRUE;
		old_task = current_task();
	}

	/*
	 * If we are spawning, we have created backing objects for the process
	 * already, which include non-lazily creating the task map.  So we
	 * are going to switch out the task map with one appropriate for the
	 * bitness of the image being loaded.
	 */
	if (spawn) {
		create_map = TRUE;
		old_task = get_threadtask(thread);
	}

	if (create_map) {
		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
				(imgp->ip_flags & IMGPF_IS_64BIT));
		map = vm_map_create(pmap,
				0,
				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
				TRUE);

	} else
		map = new_map;

#ifndef	CONFIG_ENFORCE_SIGNED_CODE
	/* This turns off faulting for executable pages, which allows to 
	 * circumvent Code Signing Enforcement */
	if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
	        vm_map_disable_NX(map);
#endif

	/* Forcibly disallow execution from data pages on even if the arch
	 * normally permits it. */
	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
		vm_map_disallow_data_exec(map);
	
	/*
	 * Compute a random offset for ASLR.
	 */
	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
		aslr_offset = random();
		aslr_offset %= 1 << ((imgp->ip_flags & IMGPF_IS_64BIT) ? 16 : 8);
		aslr_offset <<= PAGE_SHIFT;
	}
	
	if (!result)
		result = &myresult;

	*result = load_result_null;

	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
			      0, (int64_t)aslr_offset, result);

	if (lret != LOAD_SUCCESS) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		return(lret);
	}

#if CONFIG_EMBEDDED
	/*
	 * Check to see if the page zero is enforced by the map->min_offset.
	 */ 
	if (vm_map_has_hard_pagezero(map, 0x1000) == FALSE) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		printf("Cannot enforce a hard page-zero for %s\n", imgp->ip_strings);
		psignal(vfs_context_proc(imgp->ip_vfs_context), SIGKILL);
		return (LOAD_BADMACHO);
	}
#else
	/*
	 * For 64-bit users, check for presence of a 4GB page zero
	 * which will enable the kernel to share the user's address space
	 * and hence avoid TLB flushes on kernel entry/exit
	 */ 

	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
	     vm_map_has_4GB_pagezero(map)) {
		vm_map_set_4GB_pagezero(map);
	}
#endif
	/*
	 *	Commit to new map.
	 *
	 *	Swap the new map for the old, which  consumes our new map
	 *	reference but each leaves us responsible for the old_map reference.
	 *	That lets us get off the pmap associated with it, and
	 *	then we can release it.
	 */

	 if (create_map) {
		/*
		 * If this is an exec, then we are going to destroy the old
		 * task, and it's correct to halt it; if it's spawn, the
		 * task is not yet running, and it makes no sense.
		 */
	 	if (!spawn) {
			/*
			 * Mark the task as halting and start the other
			 * threads towards terminating themselves.  Then
			 * make sure any threads waiting for a process
			 * transition get informed that we are committed to
			 * this transition, and then finally complete the
			 * task halting (wait for threads and then cleanup
			 * task resources).
			 *
			 * NOTE: task_start_halt() makes sure that no new
			 * threads are created in the task during the transition.
			 * We need to mark the workqueue as exiting before we
			 * wait for threads to terminate (at the end of which
			 * we no longer have a prohibition on thread creation).
			 * 
			 * Finally, clean up any lingering workqueue data structures
			 * that may have been left behind by the workqueue threads
			 * as they exited (and then clean up the work queue itself).
			 */
			kret = task_start_halt(task);
			if (kret != KERN_SUCCESS) {
				return(kret);		
			}
			proc_transcommit(p, 0);
			workqueue_mark_exiting(p);
			task_complete_halt(task);
			workqueue_exit(p);
		}
		old_map = swap_task_map(old_task, thread, map, !spawn);
		vm_map_clear_4GB_pagezero(old_map);
		vm_map_deallocate(old_map);
	}
	return(LOAD_SUCCESS);
}
load_return_t
load_machfile(
    struct image_params	*imgp,
    struct mach_header	*header,
    thread_t 		thread,
    vm_map_t 		new_map,
    load_result_t		*result
)
{
    struct vnode		*vp = imgp->ip_vp;
    off_t			file_offset = imgp->ip_arch_offset;
    off_t			macho_size = imgp->ip_arch_size;

    pmap_t			pmap = 0;	/* protected by create_map */
    vm_map_t		map;
    vm_map_t		old_map;
    load_result_t		myresult;
    load_return_t		lret;
    boolean_t create_map = TRUE;

    if (new_map != VM_MAP_NULL) {
        create_map = FALSE;
    }

    if (create_map) {
        old_map = current_map();
        pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT));
        map = vm_map_create(pmap,
                            0,
                            vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
                            TRUE);
    } else
        map = new_map;

    if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
        vm_map_disable_NX(map);

    if (!result)
        result = &myresult;

    *result = load_result_null;

    lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
                          0, result);

    if (lret != LOAD_SUCCESS) {
        if (create_map) {
            vm_map_deallocate(map);	/* will lose pmap reference too */
        }
        return(lret);
    }

    /*
     * For 64-bit users, check for presence of a 4GB page zero
     * which will enable the kernel to share the user's address space
     * and hence avoid TLB flushes on kernel entry/exit
     */
    if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
            vm_map_has_4GB_pagezero(map))
        vm_map_set_4GB_pagezero(map);

    /*
     *	Commit to new map.  First make sure that the current
     *	users of the task get done with it, and that we clean
     *	up the old contents of IPC and memory.  The task is
     *	guaranteed to be single threaded upon return (us).
     *
     *	Swap the new map for the old, which  consumes our new map
     *	reference but each leaves us responsible for the old_map reference.
     *	That lets us get off the pmap associated with it, and
     *	then we can release it.
     */

    if (create_map) {
        task_halt(current_task());

        old_map = swap_task_map(current_task(), map);
        vm_map_clear_4GB_pagezero(old_map);
        pmap_switch(pmap);	/* Make sure we are using the new pmap */
        vm_map_deallocate(old_map);
    }
    return(LOAD_SUCCESS);
}