/**
 * Initialize a new memory object reference, mapping @a task_addr from @a task into the current process. The mapping
 * will be copy-on-write, and will be checked to ensure a minimum protection value of VM_PROT_READ.
 *
 * @param mobj Memory object to be initialized.
 * @param task The task from which the memory will be mapped.
 * @param task_addr The task-relative address of the memory to be mapped. This is not required to fall on a page boundry.
 * @param length The total size of the mapping to create.
 * @param require_full If false, short mappings will be permitted in the case where a memory object of the requested length
 * does not exist at the target address. It is the caller's responsibility to validate the resulting length of the
 * mapping, eg, using plcrash_async_mobject_remap_address() and similar. If true, and the entire requested page range is
 * not valid, the mapping request will fail.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 */
plcrash_error_t plcrash_async_mobject_init (plcrash_async_mobject_t *mobj, mach_port_t task, pl_vm_address_t task_addr, pl_vm_size_t length, bool require_full) {
    plcrash_error_t err;

    /* Perform the page mapping */
    err = plcrash_async_mobject_remap_pages_workaround(task, task_addr, length, require_full, &mobj->vm_address, &mobj->vm_length);
    if (err != PLCRASH_ESUCCESS)
        return err;

    /* Determine the offset and length of the actual data */
    mobj->address = mobj->vm_address + (task_addr - mach_vm_trunc_page(task_addr));
    mobj->length = mobj->vm_length - (mobj->address - mobj->vm_address);

    /* Ensure that the length is capped to the user's requested length, rather than the total length once rounded up
     * to a full page. The length might already be smaller than the requested length if require_full is false. */
    if (mobj->length > length)
        mobj->length = length;

    /* Determine the difference between the target and local mappings. Note that this needs to be computed on either two page
     * aligned addresses, or two non-page aligned addresses. Mixing task_addr and vm_address would return an incorrect offset. */
    mobj->vm_slide = task_addr - mobj->address;
    
    /* Save the task-relative address */
    mobj->task_address = task_addr;
    
    /* Save the task reference */
    mobj->task = task;
    mach_port_mod_refs(mach_task_self(), mobj->task, MACH_PORT_RIGHT_SEND, 1);

    return PLCRASH_ESUCCESS;
}
示例#2
0
int
mincore(__unused proc_t p, struct mincore_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr, first_addr, end;
	vm_map_t map;
	user_addr_t vec;
	int error;
	int vecindex, lastvecindex;
	int mincoreinfo=0;
	int pqueryinfo;
	kern_return_t	ret;
	int numref;

	char c;

	map = current_map();

	/*
	 * Make sure that the addresses presented are valid for user
	 * mode.
	 */
	first_addr = addr = mach_vm_trunc_page(uap->addr);
	end = addr + mach_vm_round_page(uap->len);

	if (end < addr)
		return (EINVAL);

	/*
	 * Address of byte vector
	 */
	vec = uap->vec;

	map = current_map();

	/*
	 * Do this on a map entry basis so that if the pages are not
	 * in the current processes address space, we can easily look
	 * up the pages elsewhere.
	 */
	lastvecindex = -1;
	for( ; addr < end; addr += PAGE_SIZE ) {
		pqueryinfo = 0;
		ret = mach_vm_page_query(map, addr, &pqueryinfo, &numref);
		if (ret != KERN_SUCCESS) 
			pqueryinfo = 0;
		mincoreinfo = 0;
		if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT)
			mincoreinfo |= MINCORE_INCORE;
		if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF)
			mincoreinfo |= MINCORE_REFERENCED;
		if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY)
			mincoreinfo |= MINCORE_MODIFIED;
		
		
		/*
		 * calculate index into user supplied byte vector
		 */
		vecindex = (addr - first_addr)>> PAGE_SHIFT;

		/*
		 * If we have skipped map entries, we need to make sure that
		 * the byte vector is zeroed for those skipped entries.
		 */
		while((lastvecindex + 1) < vecindex) {
			c = 0;
			error = copyout(&c, vec + lastvecindex, 1);
			if (error) {
				return (EFAULT);
			}
			++lastvecindex;
		}

		/*
		 * Pass the page information to the user
		 */
		c = (char)mincoreinfo;
		error = copyout(&c, vec + vecindex, 1);
		if (error) {
			return (EFAULT);
		}
		lastvecindex = vecindex;
	}


	/*
	 * Zero the last entries in the byte vector.
	 */
	vecindex = (end - first_addr) >> PAGE_SHIFT;
	while((lastvecindex + 1) < vecindex) {
		c = 0;
		error = copyout(&c, vec + lastvecindex, 1);
		if (error) {
			return (EFAULT);
		}
		++lastvecindex;
	}
	
	return (0);
}
示例#3
0
/*
 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
 * XXX usage is PROT_* from an interface perspective.  Thus the values of
 * XXX VM_PROT_* and PROT_* need to correspond.
 */
int
mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
{
	/*
	 *	Map in special device (must be SHARED) or file
	 */
	struct fileproc *fp;
	register struct		vnode *vp;
	int			flags;
	int			prot, file_prot;
	int			err=0;
	vm_map_t		user_map;
	kern_return_t		result;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t		user_size;
	vm_object_offset_t	pageoff;
	vm_object_offset_t	file_pos;
	int			alloc_flags=0;
	boolean_t		docow;
	vm_prot_t		maxprot;
	void 			*handle;
	vm_pager_t		pager;
	int 			mapanon=0;
	int 			fpref=0;
	int error =0;
	int fd = uap->fd;

	user_addr = (mach_vm_offset_t)uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);
	AUDIT_ARG(fd, uap->fd);

	prot = (uap->prot & VM_PROT_ALL);
#if 3777787
	/*
	 * Since the hardware currently does not support writing without
	 * read-before-write, or execution-without-read, if the request is
	 * for write or execute access, we must imply read access as well;
	 * otherwise programs expecting this to work will fail to operate.
	 */
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* radar 3777787 */

	flags = uap->flags;
	vp = NULLVP;

	/*
	 * The vm code does not have prototypes & compiler doesn't do the'
	 * the right thing when you cast 64bit value and pass it in function 
	 * call. So here it is.
	 */
	file_pos = (vm_object_offset_t)uap->pos;


	/* make sure mapping fits into numeric range etc */
	if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
		return (EINVAL);

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (file_pos & PAGE_MASK);
	file_pos -= (vm_object_offset_t)pageoff;


	/* Adjust size for rounding (on both ends). */
	user_size += pageoff;			/* low end... */
	user_size = mach_vm_round_page(user_size);	/* hi end */


	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & MAP_FIXED) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		user_addr -= pageoff;
		if (user_addr & PAGE_MASK)
		return (EINVAL);
	}
#ifdef notyet
	/* DO not have apis to get this info, need to wait till then*/
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
		addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);

#endif

	alloc_flags = 0;

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.  Use positive fds as the alias
		 * value for memory tracking. 
		 */
		if (fd != -1) {
			/*
			 * Use "fd" to pass (some) Mach VM allocation flags,
			 * (see the VM_FLAGS_* definitions).
			 */
			alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
					    VM_FLAGS_PURGABLE);
			if (alloc_flags != fd) {
				/* reject if there are any extra flags */
				return EINVAL;
			}
		}
			
		handle = NULL;
		maxprot = VM_PROT_ALL;
		file_pos = 0;
		mapanon = 1;
	} else {
		struct vnode_attr va;
		vfs_context_t ctx = vfs_context_current();

		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		err = fp_lookup(p, fd, &fp, 0);
		if (err)
			return(err);
		fpref = 1;
		if(fp->f_fglob->fg_type == DTYPE_PSXSHM) {
			uap->addr = (user_addr_t)user_addr;
			uap->len = (user_size_t)user_size;
			uap->prot = prot;
			uap->flags = flags;
			uap->pos = file_pos;
			error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
			goto bad;
		}

		if (fp->f_fglob->fg_type != DTYPE_VNODE) {
			error = EINVAL;
			goto bad;
		}
		vp = (struct vnode *)fp->f_fglob->fg_data;
		error = vnode_getwithref(vp);
		if(error != 0)
			goto bad;

		if (vp->v_type != VREG && vp->v_type != VCHR) {
			(void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		
		/*
		 * POSIX: mmap needs to update access time for mapped files
		 */
		if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
			VATTR_INIT(&va);
			nanotime(&va.va_access_time);
			VATTR_SET_ACTIVE(&va, va_access_time);
			vnode_setattr(vp, &va, ctx);
		}
		
		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR || vp->v_type == VSTR) {
			(void)vnode_put(vp);
			error = ENODEV;
			goto bad;
		} else {
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_fglob->fg_flag & FREAD)
				maxprot |= VM_PROT_READ;
			else if (prot & PROT_READ) {
				(void)vnode_put(vp);
				error = EACCES;
				goto bad;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out. 
			 */

			if ((flags & MAP_SHARED) != 0) {
				if ((fp->f_fglob->fg_flag & FWRITE) != 0) {
 					/*
 					 * check for write access
 					 *
 					 * Note that we already made this check when granting FWRITE
 					 * against the file, so it seems redundant here.
 					 */
 					error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
 
 					/* if not granted for any reason, but we wanted it, bad */
 					if ((prot & PROT_WRITE) && (error != 0)) {
 						vnode_put(vp);
  						goto bad;
  					}
 
 					/* if writable, remember */
 					if (error == 0)
  						maxprot |= VM_PROT_WRITE;

				} else if ((prot & PROT_WRITE) != 0) {
					(void)vnode_put(vp);
					error = EACCES;
					goto bad;
				}
			} else
				maxprot |= VM_PROT_WRITE;

			handle = (void *)vp;
#if CONFIG_MACF
			error = mac_file_check_mmap(vfs_context_ucred(ctx),
			    fp->f_fglob, prot, flags, &maxprot);
			if (error) {
				(void)vnode_put(vp);
				goto bad;
			}
#endif /* MAC */
		}
	}

	if (user_size == 0)  {
		if (!mapanon)
			(void)vnode_put(vp);
		error = 0;
		goto bad;
	}

	/*
	 *	We bend a little - round the start and end addresses
	 *	to the nearest page boundary.
	 */
	user_size = mach_vm_round_page(user_size);

	if (file_pos & PAGE_MASK_64) {
		if (!mapanon)
			(void)vnode_put(vp);
		error = EINVAL;
		goto bad;
	}

	user_map = current_map();

	if ((flags & MAP_FIXED) == 0) {
		alloc_flags |= VM_FLAGS_ANYWHERE;
		user_addr = mach_vm_round_page(user_addr);
	} else {
		if (user_addr != mach_vm_trunc_page(user_addr)) {
		        if (!mapanon)
			        (void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}
		/*
		 * mmap(MAP_FIXED) will replace any existing mappings in the
		 * specified range, if the new mapping is successful.
		 * If we just deallocate the specified address range here,
		 * another thread might jump in and allocate memory in that
		 * range before we get a chance to establish the new mapping,
		 * and we won't have a chance to restore the old mappings.
		 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
		 * has to deallocate the existing mappings and establish the
		 * new ones atomically.
		 */
		alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
	}

	if (flags & MAP_NOCACHE)
		alloc_flags |= VM_FLAGS_NO_CACHE;

	/*
	 * Lookup/allocate object.
	 */
	if (handle == NULL) {
		pager = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 IPC_PORT_NULL, 0, FALSE,
						 prot, maxprot,
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);
		if (result != KERN_SUCCESS) 
				goto out;
	} else {
		pager = (vm_pager_t)ubc_getpager(vp);
		
		if (pager == NULL) {
			(void)vnode_put(vp);
			error = ENOMEM;
			goto bad;
		}

		/*
		 *  Set credentials:
		 *	FIXME: if we're writing the file we need a way to
		 *      ensure that someone doesn't replace our R/W creds
		 * 	with ones that only work for read.
		 */

		ubc_setthreadcred(vp, p, current_thread());
		docow = FALSE;
		if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
			docow = TRUE;
		}

#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 (ipc_port_t)pager, file_pos,
						 docow, prot, maxprot, 
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		if (result != KERN_SUCCESS)  {
				(void)vnode_put(vp);
				goto out;
		}

		file_prot = prot & (PROT_READ | PROT_WRITE | PROT_EXEC);
		if (docow) {
			/* private mapping: won't write to the file */
			file_prot &= ~PROT_WRITE;
		}
		(void) ubc_map(vp, file_prot);
	}

	if (!mapanon)
		(void)vnode_put(vp);

out:
	switch (result) {
	case KERN_SUCCESS:
		*retval = user_addr + pageoff;
		error = 0;
		break;
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		error =  ENOMEM;
		break;
	case KERN_PROTECTION_FAILURE:
		error =  EACCES;
		break;
	default:
		error =  EINVAL;
		break;
	}
bad:
	if (fpref)
		fp_drop(p, fd, fp, 0);

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
			      (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);

	return(error);
}
/**
 * Map pages starting at @a task_addr from @a task into the current process. The mapping
 * will be copy-on-write, and will be checked to ensure a minimum protection value of
 * VM_PROT_READ.
 *
 * @param task The task from which the memory will be mapped.
 * @param task_addr The task-relative address of the memory to be mapped. This is not required to fall on a page boundry.
 * @param length The total size of the mapping to create.
 * @param require_full If false, short mappings will be permitted in the case where a memory object of the requested length
 * does not exist at the target address. It is the caller's responsibility to validate the resulting length of the
 * mapping, eg, using plcrash_async_mobject_remap_address() and similar. If true, and the entire requested page range is
 * not valid, the mapping request will fail.
 * @param[out] result The in-process address at which the pages were mapped.
 * @param[out] result_length The total size, in bytes, of the mapped pages.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 *
 * @note
 * This code previously used vm_remap() to perform atomic remapping of process memory. However, this appeared
 * to trigger a kernel bug (and resulting panic) on iOS 6.0 through 6.1.2, possibly fixed in 6.1.3. Note that
 * no stable release of PLCrashReporter shipped with the vm_remap() code.
 *
 * Investigation of the failure seems to show an over-release of the target vm_map and backing vm_object, leading to
 * NULL dereference, invalid memory references, and in some cases, deadlocks that result in watchdog timeouts.
 *
 * In one example case, the crash occurs in update_first_free_ll() as a NULL dereference of the vm_map_entry_t parameter.
 * Analysis of the limited reports shows that this is called via vm_map_store_update_first_free(). No backtrace is
 * available from the kernel panics, but analyzing the register state demonstrates:
 * - A reference to vm_map_store_update_first_free() remains in the link register.
 * - Of the following callers, one can be eliminated by register state:
 *     - vm_map_enter - not possible, r3 should be equal to r0
 *     - vm_map_clip_start - possible
 *     - vm_map_clip_unnest - possible
 *     - vm_map_clip_end - possible
 *
 * In the other panic seen in vm_object_reap_pages(), a value of 0x8008 is loaded and deferenced from the next pointer
 * of an element within the vm_object's resident page queue (object->memq).
 *
 * Unfortunately, our ability to investigate has been extremely constrained by the following issues;
 * - The panic is not easily or reliably reproducible
 * - Apple's does not support iOS kernel debugging
 * - There is no support for jailbreak kernel debugging against iOS 6.x devices at the time of writing.
 *
 * The work-around used here is to split the vm_remap() into distinct calls to mach_make_memory_entry_64() and
 * vm_map(); this follows a largely distinct code path from vm_remap(). In testing by a large-scale user of PLCrashReporter,
 * they were no longer able to reproduce the issue with this fix in place. Additionally, they've not been able to reproduce
 * the issue on 6.1.3 devices, or had any reports of the issue occuring on 6.1.3 devices.
 *
 * The mach_make_memory_entry_64() API may not actually return an entry for the full requested length; this requires
 * that we loop through the full range, requesting an entry for the remaining unallocated pages, and then mapping
 * the pages in question. Since this requires multiple calls to vm_map(), we pre-allocate a contigious range of pages
 * for the target mappings into which we'll insert (via overwrite) our own mappings.
 *
 * @note
 * As a work-around for bugs in Apple's Mach-O/dyld implementation, we provide the @a require_full flag; if false,
 * a successful mapping that is smaller than the requested range may be made, and will not return an error. This is necessary
 * to allow our callers to work around bugs in update_dyld_shared_cache(1), which writes out a larger Mach-O VM segment
 * size value than is actually available and mappable. See the plcrash_async_macho_map_segment() API documentation for
 * more details. This bug has been reported to Apple as rdar://13707406.
 */
static plcrash_error_t plcrash_async_mobject_remap_pages_workaround (mach_port_t task,
                                                                     pl_vm_address_t task_addr,
                                                                     pl_vm_size_t length,
                                                                     bool require_full,
                                                                     pl_vm_address_t *result,
                                                                     pl_vm_size_t *result_length)
{
    kern_return_t kt;

    /* Compute the total required page size. */
    pl_vm_address_t base_addr = mach_vm_trunc_page(task_addr);
    pl_vm_size_t total_size = mach_vm_round_page(length + (task_addr - base_addr));
    
    /*
     * If short mappings are permitted, determine the actual mappable size of the target range. Due
     * to rdar://13707406 (update_dyld_shared_cache appears to write invalid LINKEDIT vmsize), an
     * LC_SEGMENT-reported VM size may be far larger than the actual mapped pages. This would result
     * in us making large (eg, 36MB) allocations in cases where the mappable range is actually much
     * smaller, which can trigger out-of-memory conditions on smaller devices.
     */
    if (!require_full) {
        pl_vm_size_t verified_size = 0;
        
        while (verified_size < total_size) {            
            memory_object_size_t entry_length = total_size - verified_size;
            mach_port_t mem_handle;
            
            /* Fetch an entry reference */
            kt = mach_make_memory_entry_64(task, &entry_length, base_addr + verified_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
            if (kt != KERN_SUCCESS) {
                /* Once we hit an unmappable page, break */
                break;
            }
            
            /* Drop the reference */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }

            /* Note the size */
            verified_size += entry_length;
        }

        /* No valid page found at the task_addr */
        if (verified_size == 0) {
            PLCF_DEBUG("No mappable pages found at 0x%" PRIx64, (uint64_t) task_addr);
            return PLCRASH_ENOMEM;
        }

        /* Reduce the total size to the verified size */
        if (verified_size < total_size)
            total_size = verified_size;
    }

    /*
     * Set aside a memory range large enough for the total requested number of pages. Ideally the kernel
     * will lazy-allocate the backing physical pages so that we don't waste actual memory on this
     * pre-emptive page range reservation.
     */
    pl_vm_address_t mapping_addr = 0x0;
    pl_vm_size_t mapped_size = 0;
#ifdef PL_HAVE_MACH_VM
    kt = mach_vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#else
    kt = vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#endif

    if (kt != KERN_SUCCESS) {
        PLCF_DEBUG("Failed to allocate a target page range for the page remapping: %d", kt);
        return PLCRASH_EINTERNAL;
    }

    /* Map the source pages into the allocated region, overwriting the existing page mappings */
    while (mapped_size < total_size) {
        /* Create a reference to the target pages. The returned entry may be smaller than the total length. */
        memory_object_size_t entry_length = total_size - mapped_size;
        mach_port_t mem_handle;
        kt = mach_make_memory_entry_64(task, &entry_length, base_addr + mapped_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
        if (kt != KERN_SUCCESS) {            
            /* No pages are found at the target. When validating the total length above, we already verified the
             * availability of the requested pages; if they've now disappeared, we can treat it as an error,
             * even if !require_full was specified */
            PLCF_DEBUG("mach_make_memory_entry_64() failed: %d", kt);
            
            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }
            
            /* Return error */
            return PLCRASH_ENOMEM;
        }
        
        /* Map the pages into our local task, overwriting the allocation used to reserve the target space above. */
        pl_vm_address_t target_address = mapping_addr + mapped_size;
#ifdef PL_HAVE_MACH_VM
        kt = mach_vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#else
        kt = vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#endif /* !PL_HAVE_MACH_VM */
        
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("vm_map() failure: %d", kt);

            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }

            /* Drop the memory handle */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }
            
            return PLCRASH_ENOMEM;
        }

        /* Drop the memory handle */
        kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
        }
        
        /* Adjust the total mapping size */
        mapped_size += entry_length;
    }
    
    *result = mapping_addr;
    *result_length = mapped_size;

    return PLCRASH_ESUCCESS;
}