Esempio n. 1
0
/* Not called from probe context */
int
uread(proc_t *p, void *buf, user_size_t len, user_addr_t a)
{
	kern_return_t ret;

	ASSERT(p != PROC_NULL);
	ASSERT(p->task != NULL);

	task_t task = p->task;

	/*
	 * Grab a reference to the task vm_map_t to make sure
	 * the map isn't pulled out from under us.
	 *
	 * Because the proc_lock is not held at all times on all code
	 * paths leading here, it is possible for the proc to have
	 * exited. If the map is null, fail.
	 */
	vm_map_t map = get_task_map_reference(task);
	if (map) {
		ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len);
		vm_map_deallocate(map);
	} else
		ret = KERN_TERMINATED;
	
	return (int)ret;
}
Esempio n. 2
0
/* Routine vm_cache_statistics */
mig_internal void _Xvm_cache_statistics
	(mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
{
	typedef struct {
		mach_msg_header_t Head;
	} Request;

	typedef struct {
		mach_msg_header_t Head;
		mach_msg_type_t RetCodeType;
		kern_return_t RetCode;
		mach_msg_type_t vm_cache_statsType;
		vm_cache_statistics_data_t vm_cache_stats;
	} Reply;

	Request *In0P = (Request *) InHeadP;
	Reply *OutP = (Reply *) OutHeadP;
	mig_external kern_return_t vm_cache_statistics
		(vm_map_t target_task, vm_cache_statistics_data_t *vm_cache_stats);

	const mach_msg_type_t vm_cache_statsType = {
		/* msgt_name = */		2,
		/* msgt_size = */		32,
		/* msgt_number = */		11,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

	vm_map_t target_task;

#if	TypeCheck
	if ((In0P->Head.msgh_size != 24) ||
	    (In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX))
		{ OutP->RetCode = MIG_BAD_ARGUMENTS; return; }
#endif	/* TypeCheck */

	target_task = convert_port_to_map((ipc_port_t) In0P->Head.msgh_request_port);

	OutP->RetCode = vm_cache_statistics(target_task, &OutP->vm_cache_stats);
	vm_map_deallocate(target_task);
	if (OutP->RetCode != KERN_SUCCESS)
		return;

	OutP->Head.msgh_size = 80;

	OutP->vm_cache_statsType = vm_cache_statsType;
}
/*
 * free:
 *
 * Free resources
 */
void IOBufferMemoryDescriptor::free()
{
    // Cache all of the relevant information on the stack for use
    // after we call super::free()!
    IOOptionBits options   = _options;
    vm_size_t    size	   = _capacity;
    void *       buffer	   = _buffer;
    vm_map_t	 map	   = 0;
    vm_offset_t  alignment = _alignment;

    if (reserved)
    {
        map = reserved->map;
        IODelete( reserved, ExpansionData, 1 );
    }

    /* super::free may unwire - deallocate buffer afterwards */
    super::free();

    if (buffer)
    {
        if (options & kIOMemoryPageable)
        {
            if (map)
                vm_deallocate(map, (vm_address_t) buffer, round_page_32(size));
            else
                IOFreePageable(buffer, size);
        }
        else
        {
            if (options & kIOMemoryPhysicallyContiguous)
                IOFreeContiguous(buffer, size);
            else if (alignment > 1)
                IOFreeAligned(buffer, size);
            else
                IOFree(buffer, size);
        }
    }
    if (map)
        vm_map_deallocate(map);
}
Esempio n. 4
0
load_return_t
load_machfile(
	struct image_params	*imgp,
	struct mach_header	*header,
	thread_t 		thread,
	vm_map_t 		new_map,
	load_result_t		*result
)
{
	struct vnode		*vp = imgp->ip_vp;
	off_t			file_offset = imgp->ip_arch_offset;
	off_t			macho_size = imgp->ip_arch_size;
	off_t			file_size = imgp->ip_vattr->va_data_size;
	
	pmap_t			pmap = 0;	/* protected by create_map */
	vm_map_t		map;
	vm_map_t		old_map;
	task_t			old_task = TASK_NULL; /* protected by create_map */
	load_result_t		myresult;
	load_return_t		lret;
	boolean_t create_map = FALSE;
	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
	task_t task = current_task();
	proc_t p = current_proc();
	mach_vm_offset_t	aslr_offset = 0;
	kern_return_t 		kret;

	if (macho_size > file_size) {
		return(LOAD_BADMACHO);
	}

	if (new_map == VM_MAP_NULL) {
		create_map = TRUE;
		old_task = current_task();
	}

	/*
	 * If we are spawning, we have created backing objects for the process
	 * already, which include non-lazily creating the task map.  So we
	 * are going to switch out the task map with one appropriate for the
	 * bitness of the image being loaded.
	 */
	if (spawn) {
		create_map = TRUE;
		old_task = get_threadtask(thread);
	}

	if (create_map) {
		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
				(imgp->ip_flags & IMGPF_IS_64BIT));
		map = vm_map_create(pmap,
				0,
				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
				TRUE);

	} else
		map = new_map;

#ifndef	CONFIG_ENFORCE_SIGNED_CODE
	/* This turns off faulting for executable pages, which allows to 
	 * circumvent Code Signing Enforcement */
	if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
	        vm_map_disable_NX(map);
#endif

	/* Forcibly disallow execution from data pages on even if the arch
	 * normally permits it. */
	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
		vm_map_disallow_data_exec(map);
	
	/*
	 * Compute a random offset for ASLR.
	 */
	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
		aslr_offset = random();
		aslr_offset %= 1 << ((imgp->ip_flags & IMGPF_IS_64BIT) ? 16 : 8);
		aslr_offset <<= PAGE_SHIFT;
	}
	
	if (!result)
		result = &myresult;

	*result = load_result_null;

	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
			      0, (int64_t)aslr_offset, result);

	if (lret != LOAD_SUCCESS) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		return(lret);
	}

#if CONFIG_EMBEDDED
	/*
	 * Check to see if the page zero is enforced by the map->min_offset.
	 */ 
	if (vm_map_has_hard_pagezero(map, 0x1000) == FALSE) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		printf("Cannot enforce a hard page-zero for %s\n", imgp->ip_strings);
		psignal(vfs_context_proc(imgp->ip_vfs_context), SIGKILL);
		return (LOAD_BADMACHO);
	}
#else
	/*
	 * For 64-bit users, check for presence of a 4GB page zero
	 * which will enable the kernel to share the user's address space
	 * and hence avoid TLB flushes on kernel entry/exit
	 */ 

	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
	     vm_map_has_4GB_pagezero(map)) {
		vm_map_set_4GB_pagezero(map);
	}
#endif
	/*
	 *	Commit to new map.
	 *
	 *	Swap the new map for the old, which  consumes our new map
	 *	reference but each leaves us responsible for the old_map reference.
	 *	That lets us get off the pmap associated with it, and
	 *	then we can release it.
	 */

	 if (create_map) {
		/*
		 * If this is an exec, then we are going to destroy the old
		 * task, and it's correct to halt it; if it's spawn, the
		 * task is not yet running, and it makes no sense.
		 */
	 	if (!spawn) {
			/*
			 * Mark the task as halting and start the other
			 * threads towards terminating themselves.  Then
			 * make sure any threads waiting for a process
			 * transition get informed that we are committed to
			 * this transition, and then finally complete the
			 * task halting (wait for threads and then cleanup
			 * task resources).
			 *
			 * NOTE: task_start_halt() makes sure that no new
			 * threads are created in the task during the transition.
			 * We need to mark the workqueue as exiting before we
			 * wait for threads to terminate (at the end of which
			 * we no longer have a prohibition on thread creation).
			 * 
			 * Finally, clean up any lingering workqueue data structures
			 * that may have been left behind by the workqueue threads
			 * as they exited (and then clean up the work queue itself).
			 */
			kret = task_start_halt(task);
			if (kret != KERN_SUCCESS) {
				return(kret);		
			}
			proc_transcommit(p, 0);
			workqueue_mark_exiting(p);
			task_complete_halt(task);
			workqueue_exit(p);
		}
		old_map = swap_task_map(old_task, thread, map, !spawn);
		vm_map_clear_4GB_pagezero(old_map);
		vm_map_deallocate(old_map);
	}
	return(LOAD_SUCCESS);
}
Esempio n. 5
0
int
fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uint32_t  *vnodeaddr, uint32_t  *vid)
{

	vm_map_t map;
	vm_map_offset_t	address = (vm_map_offset_t )arg;
	vm_map_entry_t		tmp_entry;
	vm_map_entry_t		entry;
	vm_map_offset_t		start;
	vm_region_extended_info_data_t extended;
	vm_region_top_info_data_t top;

	    task_lock(task);
	    map = task->map;
	    if (map == VM_MAP_NULL) 
	    {
			task_unlock(task);
			return(0);
	    }
	    vm_map_reference(map); 
	    task_unlock(task);
	    
	    vm_map_lock_read(map);

	    start = address;
	    if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
		if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
			vm_map_unlock_read(map);
	    		vm_map_deallocate(map); 
		   	return(0);
		}
	    } else {
		entry = tmp_entry;
	    }

	    start = entry->vme_start;

	    pinfo->pri_offset = entry->offset;
	    pinfo->pri_protection = entry->protection;
	    pinfo->pri_max_protection = entry->max_protection;
	    pinfo->pri_inheritance = entry->inheritance;
	    pinfo->pri_behavior = entry->behavior;
	    pinfo->pri_user_wired_count = entry->user_wired_count;
	    pinfo->pri_user_tag = entry->alias;

	    if (entry->is_sub_map) {
		pinfo->pri_flags |= PROC_REGION_SUBMAP;
	    } else {
		if (entry->is_shared)
			pinfo->pri_flags |= PROC_REGION_SHARED;
	    }


	    extended.protection = entry->protection;
	    extended.user_tag = entry->alias;
	    extended.pages_resident = 0;
	    extended.pages_swapped_out = 0;
	    extended.pages_shared_now_private = 0;
	    extended.pages_dirtied = 0;
	    extended.external_pager = 0;
	    extended.shadow_depth = 0;

	    vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, &extended);

	    if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED)
	            extended.share_mode = SM_PRIVATE;

	    top.private_pages_resident = 0;
	    top.shared_pages_resident = 0;
	    vm_map_region_top_walk(entry, &top);

	
	    pinfo->pri_pages_resident = extended.pages_resident;
	    pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
	    pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
	    pinfo->pri_pages_dirtied = extended.pages_dirtied;
	    pinfo->pri_ref_count = extended.ref_count;
	    pinfo->pri_shadow_depth = extended.shadow_depth;
	    pinfo->pri_share_mode = extended.share_mode;

	    pinfo->pri_private_pages_resident = top.private_pages_resident;
	    pinfo->pri_shared_pages_resident = top.shared_pages_resident;
	    pinfo->pri_obj_id = top.obj_id;
		
	    pinfo->pri_address = (uint64_t)start;
	    pinfo->pri_size = (uint64_t)(entry->vme_end - start);
	    pinfo->pri_depth = 0;
	
	    if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
		*vnodeaddr = (uint32_t)0;

		if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) {
			vm_map_unlock_read(map);
	    		vm_map_deallocate(map); 
			return(1);
		}
	    }

	    vm_map_unlock_read(map);
	    vm_map_deallocate(map); 
	    return(1);
}
/* Routine vm_allocate_contiguous */
mig_internal void _Xvm_allocate_contiguous
	(mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP)
{
	typedef struct {
		mach_msg_header_t Head;
		mach_msg_type_t target_taskType;
		ipc_port_t target_task;
		mach_msg_type_t sizeType;
		vm_size_t size;
	} Request;

	typedef struct {
		mach_msg_header_t Head;
		mach_msg_type_t RetCodeType;
		kern_return_t RetCode;
		mach_msg_type_t vaddrType;
		vm_address_t vaddr;
		mach_msg_type_t paddrType;
		vm_address_t paddr;
	} Reply;

	Request *In0P = (Request *) InHeadP;
	Reply *OutP = (Reply *) OutHeadP;
	mig_external kern_return_t experimental_vm_allocate_contiguous
		(host_t host_priv, vm_map_t target_task, vm_address_t *vaddr, vm_address_t *paddr, vm_size_t size);

	const mach_msg_type_t sizeCheck = {
		/* msgt_name = */		2,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

	const mach_msg_type_t vaddrType = {
		/* msgt_name = */		2,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

	const mach_msg_type_t paddrType = {
		/* msgt_name = */		2,
		/* msgt_size = */		32,
		/* msgt_number = */		1,
		/* msgt_inline = */		TRUE,
		/* msgt_longform = */		FALSE,
		/* msgt_deallocate = */		FALSE,
		/* msgt_unused = */		0
	};

	vm_map_t target_task;

#if	TypeCheck
	if ((In0P->Head.msgh_size != 40) ||
	    !(In0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX))
		{ OutP->RetCode = MIG_BAD_ARGUMENTS; return; }
#endif	/* TypeCheck */

#if	TypeCheck
	if ((In0P->target_taskType.msgt_inline != TRUE) ||
	    (In0P->target_taskType.msgt_longform != FALSE) ||
	    (In0P->target_taskType.msgt_name != 17) ||
	    (In0P->target_taskType.msgt_number != 1) ||
	    (In0P->target_taskType.msgt_size != 32))
		{ OutP->RetCode = MIG_BAD_ARGUMENTS; return; }
#endif	/* TypeCheck */

#if	TypeCheck
	if (BAD_TYPECHECK(&In0P->sizeType, &sizeCheck))
		{ OutP->RetCode = MIG_BAD_ARGUMENTS; return; }
#endif	/* TypeCheck */

	target_task = convert_port_to_map(In0P->target_task);

	OutP->RetCode = experimental_vm_allocate_contiguous(convert_port_to_host_priv((ipc_port_t) In0P->Head.msgh_request_port), target_task, &OutP->vaddr, &OutP->paddr, In0P->size);
	vm_map_deallocate(target_task);
	if (OutP->RetCode != KERN_SUCCESS)
		return;

	if (IP_VALID(In0P->target_task))
		ipc_port_release_send(In0P->target_task);

	OutP->Head.msgh_size = 48;

	OutP->vaddrType = vaddrType;

	OutP->paddrType = paddrType;
}
Esempio n. 7
0
/* Not called from probe context */
int
uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a)
{
	kern_return_t ret;

	ASSERT(p != NULL);
	ASSERT(p->task != NULL);

	task_t task = p->task;

	/*
	 * Grab a reference to the task vm_map_t to make sure
	 * the map isn't pulled out from under us.
	 *
	 * Because the proc_lock is not held at all times on all code
	 * paths leading here, it is possible for the proc to have
	 * exited. If the map is null, fail.
	 */
	vm_map_t map = get_task_map_reference(task);
	if (map) {
		/* Find the memory permissions. */
		uint32_t nestingDepth=999999;
		vm_region_submap_short_info_data_64_t info;
		mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
		mach_vm_address_t address = (mach_vm_address_t)a;
		mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len;
	
		ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count);
		if (ret != KERN_SUCCESS)
			goto done;

		vm_prot_t reprotect;

		if (!(info.protection & VM_PROT_WRITE)) {
			/* Save the original protection values for restoration later */
			reprotect = info.protection;

			if (info.max_protection & VM_PROT_WRITE) {
				/* The memory is not currently writable, but can be made writable. */
				ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE);
			} else {
				/*
				 * The memory is not currently writable, and cannot be made writable. We need to COW this memory.
				 *
				 * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails.
				 */
				ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
			}

			if (ret != KERN_SUCCESS)
				goto done;

		} else {
			/* The memory was already writable. */
			reprotect = VM_PROT_NONE;
		}

		ret = vm_map_write_user( map,
					 buf,
					 (vm_map_address_t)a,
					 (vm_size_t)len);

		dtrace_flush_caches();

		if (ret != KERN_SUCCESS)
			goto done;

		if (reprotect != VM_PROT_NONE) {
			ASSERT(reprotect & VM_PROT_EXECUTE);
			ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect);
		}

done:
		vm_map_deallocate(map);
	} else 
		ret = KERN_TERMINATED;

	return (int)ret;
}
Esempio n. 8
0
load_return_t
load_machfile(
    struct image_params	*imgp,
    struct mach_header	*header,
    thread_t 		thread,
    vm_map_t 		new_map,
    load_result_t		*result
)
{
    struct vnode		*vp = imgp->ip_vp;
    off_t			file_offset = imgp->ip_arch_offset;
    off_t			macho_size = imgp->ip_arch_size;

    pmap_t			pmap = 0;	/* protected by create_map */
    vm_map_t		map;
    vm_map_t		old_map;
    load_result_t		myresult;
    load_return_t		lret;
    boolean_t create_map = TRUE;

    if (new_map != VM_MAP_NULL) {
        create_map = FALSE;
    }

    if (create_map) {
        old_map = current_map();
        pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT));
        map = vm_map_create(pmap,
                            0,
                            vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
                            TRUE);
    } else
        map = new_map;

    if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
        vm_map_disable_NX(map);

    if (!result)
        result = &myresult;

    *result = load_result_null;

    lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
                          0, result);

    if (lret != LOAD_SUCCESS) {
        if (create_map) {
            vm_map_deallocate(map);	/* will lose pmap reference too */
        }
        return(lret);
    }

    /*
     * For 64-bit users, check for presence of a 4GB page zero
     * which will enable the kernel to share the user's address space
     * and hence avoid TLB flushes on kernel entry/exit
     */
    if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
            vm_map_has_4GB_pagezero(map))
        vm_map_set_4GB_pagezero(map);

    /*
     *	Commit to new map.  First make sure that the current
     *	users of the task get done with it, and that we clean
     *	up the old contents of IPC and memory.  The task is
     *	guaranteed to be single threaded upon return (us).
     *
     *	Swap the new map for the old, which  consumes our new map
     *	reference but each leaves us responsible for the old_map reference.
     *	That lets us get off the pmap associated with it, and
     *	then we can release it.
     */

    if (create_map) {
        task_halt(current_task());

        old_map = swap_task_map(current_task(), map);
        vm_map_clear_4GB_pagezero(old_map);
        pmap_switch(pmap);	/* Make sure we are using the new pmap */
        vm_map_deallocate(old_map);
    }
    return(LOAD_SUCCESS);
}
Esempio n. 9
0
static
load_return_t
load_dylinker(
    struct dylinker_command	*lcp,
    integer_t		archbits,
    vm_map_t		map,
    thread_t	thread,
    int			depth,
    load_result_t		*result,
    boolean_t		is_64bit
)
{
    char			*name;
    char			*p;
    struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
    struct mach_header	header;
    off_t			file_offset = 0; /* set by get_macho_vnode() */
    off_t			macho_size = 0;	/* set by get_macho_vnode() */
    vm_map_t		copy_map;
    load_result_t		myresult;
    kern_return_t		ret;
    vm_map_copy_t	tmp;
    mach_vm_offset_t	dyl_start, map_addr;
    mach_vm_size_t		dyl_length;

    name = (char *)lcp + lcp->name.offset;
    /*
     *	Check for a proper null terminated string.
     */
    p = name;
    do {
        if (p >= (char *)lcp + lcp->cmdsize)
            return(LOAD_BADMACHO);
    } while (*p++);

    ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
    if (ret)
        return (ret);

    myresult = load_result_null;

    /*
     *	First try to map dyld in directly.  This should work most of
     *	the time since there shouldn't normally be something already
     *	mapped to its address.
     */

    ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
                         depth, &myresult);

    /*
     *	If it turned out something was in the way, then we'll take
     *	take this longer path to map dyld into a temporary map and
     *	copy it into destination map at a different address.
     */

    if (ret == LOAD_NOSPACE) {

        /*
         *	Load the Mach-O.
         *	Use a temporary map to do the work.
         */
        copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
                                             is_64bit),
                                 get_map_min(map), get_map_max(map), TRUE);
        if (VM_MAP_NULL == copy_map) {
            ret = LOAD_RESOURCE;
            goto out;
        }

        myresult = load_result_null;

        ret = parse_machfile(vp, copy_map, thread, &header,
                             file_offset, macho_size,
                             depth, &myresult);

        if (ret) {
            vm_map_deallocate(copy_map);
            goto out;
        }

        if (get_map_nentries(copy_map) > 0) {

            dyl_start = mach_get_vm_start(copy_map);
            dyl_length = mach_get_vm_end(copy_map) - dyl_start;

            map_addr = dyl_start;
            ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);

            if (ret != KERN_SUCCESS) {
                vm_map_deallocate(copy_map);
                ret = LOAD_NOSPACE;
                goto out;

            }

            ret = vm_map_copyin(copy_map,
                                (vm_map_address_t)dyl_start,
                                (vm_map_size_t)dyl_length,
                                TRUE, &tmp);
            if (ret != KERN_SUCCESS) {
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            ret = vm_map_copy_overwrite(map,
                                        (vm_map_address_t)map_addr,
                                        tmp, FALSE);
            if (ret != KERN_SUCCESS) {
                vm_map_copy_discard(tmp);
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            if (map_addr != dyl_start)
                myresult.entry_point += (map_addr - dyl_start);
        } else {
            ret = LOAD_FAILURE;
        }

        vm_map_deallocate(copy_map);
    }

    if (ret == LOAD_SUCCESS) {
        result->dynlinker = TRUE;
        result->entry_point = myresult.entry_point;
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);
    }
out:
    vnode_put(vp);
    return (ret);

}