Exemplo n.º 1
0
RefPtr<SharedMemory> SharedMemory::map(const Handle& handle, Protection protection)
{
    if (handle.isNull())
        return 0;
    
    ASSERT(round_page(handle.m_size) == handle.m_size);

    vm_prot_t vmProtection = machProtection(protection);
    mach_vm_address_t mappedAddress = 0;
    kern_return_t kr = mach_vm_map(mach_task_self(), &mappedAddress, round_page(handle.m_size), 0, VM_FLAGS_ANYWHERE, handle.m_port, 0, false, vmProtection, vmProtection, VM_INHERIT_NONE);
#if RELEASE_LOG_DISABLED
    if (kr != KERN_SUCCESS)
        return nullptr;
#else
    if (kr != KERN_SUCCESS) {
        RELEASE_LOG_ERROR(VirtualMemory, "%p - SharedMemory::map: Failed to map shared memory. %{public}s (%x)", nullptr, mach_error_string(kr), kr);
        return nullptr;
    }
#endif

    auto sharedMemory(adoptRef(*new SharedMemory));
    sharedMemory->m_size = handle.m_size;
    sharedMemory->m_data = toPointer(mappedAddress);
    sharedMemory->m_port = MACH_PORT_NULL;
    sharedMemory->m_protection = protection;

    return WTFMove(sharedMemory);
}
Exemplo n.º 2
0
kern_return_t
kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed)
{
    kern_return_t rval = 0;
#if CONFIG_KEXT_BASEMENT
    mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base;
#else
    mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base;
#endif
    int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE;
 
#if CONFIG_KEXT_BASEMENT
    /* Allocate the kext virtual memory
     * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past
     * kext_post_boot_base (when possible).  mach_vm_allocate will always 
     * start at 0 into the map no matter what you pass in addr.  We want non 
     * fixed (post boot) kext allocations to start looking for free space 
     * just past where prelinked kexts have loaded.  
     */
    rval = mach_vm_map(g_kext_map, 
                       &addr, 
                       size, 
                       0,
                       flags,
                       MACH_PORT_NULL,
                       0,
                       TRUE,
                       VM_PROT_DEFAULT,
                       VM_PROT_ALL,
                       VM_INHERIT_DEFAULT);
    if (rval != KERN_SUCCESS) {
        printf("mach_vm_map failed - %d\n", rval);
        goto finish;
    }
#else
    rval = mach_vm_allocate(g_kext_map, &addr, size, flags);
    if (rval != KERN_SUCCESS) {
        printf("vm_allocate failed - %d\n", rval);
        goto finish;
    }
#endif

    /* Check that the memory is reachable by kernel text */
    if ((addr + size) > kext_alloc_max) {
        kext_free((vm_offset_t)addr, size);
        rval = KERN_INVALID_ADDRESS;
        goto finish;
    }

    *_addr = (vm_offset_t)addr;
    rval = KERN_SUCCESS;

finish:
    return rval;
}
Exemplo n.º 3
0
PassRefPtr<SharedMemory> SharedMemory::create(const Handle& handle, Protection protection)
{
    if (handle.isNull())
        return 0;
    
    // Map the memory.
    vm_prot_t vmProtection = machProtection(protection);
    mach_vm_address_t mappedAddress = 0;
    kern_return_t kr = mach_vm_map(mach_task_self(), &mappedAddress, handle.m_size, 0, VM_FLAGS_ANYWHERE, handle.m_port, 0, false, vmProtection, vmProtection, VM_INHERIT_NONE);
    if (kr != KERN_SUCCESS)
        return 0;

    RefPtr<SharedMemory> sharedMemory(adoptRef(new SharedMemory));
    sharedMemory->m_size = handle.m_size;
    sharedMemory->m_data = toPointer(mappedAddress);
    
    return sharedMemory.release();
}
Exemplo n.º 4
0
RefPtr<SharedMemory> SharedMemory::map(const Handle& handle, Protection protection)
{
    if (handle.isNull())
        return 0;
    
    ASSERT(round_page(handle.m_size) == handle.m_size);

    vm_prot_t vmProtection = machProtection(protection);
    mach_vm_address_t mappedAddress = 0;
    kern_return_t kr = mach_vm_map(mach_task_self(), &mappedAddress, round_page(handle.m_size), 0, VM_FLAGS_ANYWHERE, handle.m_port, 0, false, vmProtection, vmProtection, VM_INHERIT_NONE);
    if (kr != KERN_SUCCESS)
        return nullptr;

    auto sharedMemory(adoptRef(*new SharedMemory));
    sharedMemory->m_size = handle.m_size;
    sharedMemory->m_data = toPointer(mappedAddress);
    sharedMemory->m_port = MACH_PORT_NULL;
    sharedMemory->m_protection = protection;

    return WTFMove(sharedMemory);
}
Exemplo n.º 5
0
static bool
CreateThePort(mach_vm_address_t& child_address)
{
  mach_vm_address_t address;
  mach_port_t port;
  size_t size = 8000;

  kern_return_t kr = mach_vm_allocate(mach_task_self(), &address, round_page(size), VM_FLAGS_ANYWHERE);
  if (kr != KERN_SUCCESS) {
    printf("Failed to allocate mach_vm_allocate shared memory (%zu bytes). %s (%x)", size, mach_error_string(kr), kr);
    return false;
  }

  memory_object_size_t memoryObjectSize = round_page(size);

  kr = mach_make_memory_entry_64(mach_task_self(), &memoryObjectSize, address, VM_PROT_DEFAULT, &port, MACH_PORT_NULL);
  if (kr != KERN_SUCCESS) {
    printf("Failed to make memory entry (%zu bytes). %s (%x)\n", size, mach_error_string(kr), kr);
    return false;
  }

  vm_prot_t vmProtection = VM_PROT_READ | VM_PROT_WRITE;

  // Choose an address that will be valid in the child process and point to our buffer.
  // child_address must not be dereferenced in the parent process.
  child_address = address + 0x10000;

  kr = mach_vm_map(child_task, &child_address, round_page(size), 0, 0,
                  port, 0, false, vmProtection, vmProtection, VM_INHERIT_NONE);
  if (kr != KERN_SUCCESS) {
    printf("Failed to mach_vm_map (%zu bytes). %s (%x)\n", size, mach_error_string(kr), kr);
    return false;
  }

  int* buf = reinterpret_cast<int*>(static_cast<uintptr_t>(address));

  buf[0] = 42;

  return true;
}
Exemplo n.º 6
0
int
_kernelrpc_mach_vm_map_trap(struct _kernelrpc_mach_vm_map_trap_args *args)
{
	mach_vm_offset_t addr;
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	if (copyin(args->addr, (char *)&addr, sizeof (addr)))
		goto done;

	rv = mach_vm_map(task->map, &addr, args->size, args->mask, args->flags,
			IPC_PORT_NULL, 0, FALSE, args->cur_protection, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (rv == KERN_SUCCESS)
		rv = copyout(&addr, args->addr, sizeof (addr));

done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Exemplo n.º 7
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
{
#if HAVE_MACH
	kern_return_t kr;
	mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
	mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
	mach_vm_address_t vm_addr = vm_page_size;
	while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
			vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
			MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT))) {
		if (kr != KERN_NO_SPACE) {
			(void)dispatch_assume_zero(kr);
			DISPATCH_CLIENT_CRASH("Could not allocate heap");
		}
		_dispatch_temporary_resource_shortage();
		vm_addr = vm_page_size;
	}
	uintptr_t aligned_region = (uintptr_t)vm_addr;
#else // HAVE_MACH
	const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
	void *region_p;
	while (!dispatch_assume((region_p = mmap(NULL, region_sz,
			PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
			VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
		_dispatch_temporary_resource_shortage();
	}
	uintptr_t region = (uintptr_t)region_p;
	uintptr_t region_end = region + region_sz;
	uintptr_t aligned_region, aligned_region_end;
	uintptr_t bottom_slop_len, top_slop_len;
	// Realign if needed; find the slop at top/bottom to unmap
	if ((region & ~(MAGAZINE_MASK)) == 0) {
		bottom_slop_len = 0;
		aligned_region = region;
		aligned_region_end = region_end - BYTES_PER_MAGAZINE;
		top_slop_len = BYTES_PER_MAGAZINE;
	} else {
		aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
		aligned_region_end = aligned_region +
				(MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
		bottom_slop_len = aligned_region - region;
		top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
	}
#if DISPATCH_DEBUG
	// Double-check our math.
	dispatch_assert(aligned_region % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end > aligned_region);
	dispatch_assert(top_slop_len % PAGE_SIZE == 0);
	dispatch_assert(bottom_slop_len % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end + top_slop_len == region_end);
	dispatch_assert(region + bottom_slop_len == aligned_region);
	dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
			MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
				PROT_NONE));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
				top_slop_len, PROT_NONE));
	}
#else
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)aligned_region_end,
				top_slop_len));
	}
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH

	if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
			relaxed)) {
		// If we lost the race to link in the new region, unmap the whole thing.
#if DISPATCH_DEBUG
		(void)dispatch_assume_zero(mprotect((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
#else
		(void)dispatch_assume_zero(munmap((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
#endif
	}
}
Exemplo n.º 8
0
static
load_return_t
load_segment(
	struct load_command		*lcp,
	uint32_t			filetype,
	void *				control,
	off_t				pager_offset,
	off_t				macho_size,
	struct vnode			*vp,
	vm_map_t			map,
	int64_t				slide,
	load_result_t		*result
)
{
	struct segment_command_64 segment_command, *scp;
	kern_return_t		ret;
	vm_map_offset_t		map_addr, map_offset;
	vm_map_size_t		map_size, seg_size, delta_size;
	vm_prot_t 		initprot;
	vm_prot_t		maxprot;
	size_t			segment_command_size, total_section_size,
				single_section_size;
	boolean_t		prohibit_pagezero_mapping = FALSE;
	
	if (LC_SEGMENT_64 == lcp->cmd) {
		segment_command_size = sizeof(struct segment_command_64);
		single_section_size  = sizeof(struct section_64);
	} else {
		segment_command_size = sizeof(struct segment_command);
		single_section_size  = sizeof(struct section);
	}
	if (lcp->cmdsize < segment_command_size)
		return (LOAD_BADMACHO);
	total_section_size = lcp->cmdsize - segment_command_size;

	if (LC_SEGMENT_64 == lcp->cmd)
		scp = (struct segment_command_64 *)lcp;
	else {
		scp = &segment_command;
		widen_segment_command((struct segment_command *)lcp, scp);
	}

	/*
	 * Make sure what we get from the file is really ours (as specified
	 * by macho_size).
	 */
	if (scp->fileoff + scp->filesize < scp->fileoff ||
	    scp->fileoff + scp->filesize > (uint64_t)macho_size)
		return (LOAD_BADMACHO);
	/*
	 * Ensure that the number of sections specified would fit
	 * within the load command size.
	 */
	if (total_section_size / single_section_size < scp->nsects)
		return (LOAD_BADMACHO);
	/*
	 * Make sure the segment is page-aligned in the file.
	 */
	if ((scp->fileoff & PAGE_MASK_64) != 0)
		return (LOAD_BADMACHO);

	/*
	 *	Round sizes to page size.
	 */
	seg_size = round_page_64(scp->vmsize);
	map_size = round_page_64(scp->filesize);
	map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
	if (seg_size == 0)
		return (KERN_SUCCESS);
	if (map_addr == 0 &&
	    map_size == 0 &&
	    seg_size != 0 &&
	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
		/*
		 * For PIE, extend page zero rather than moving it.  Extending
		 * page zero keeps early allocations from falling predictably
		 * between the end of page zero and the beginning of the first
		 * slid segment.
		 */
		seg_size += slide;
		slide = 0;
#if CONFIG_EMBEDDED
		prohibit_pagezero_mapping = TRUE;
#endif
		/* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
		if (scp->cmd == LC_SEGMENT_64) {
		        prohibit_pagezero_mapping = TRUE;
		}
		
		if (prohibit_pagezero_mapping) {
			/*
			 * This is a "page zero" segment:  it starts at address 0,
			 * is not mapped from the binary file and is not accessible.
			 * User-space should never be able to access that memory, so
			 * make it completely off limits by raising the VM map's
			 * minimum offset.
			 */
			ret = vm_map_raise_min_offset(map, seg_size);
			if (ret != KERN_SUCCESS) {
				return (LOAD_FAILURE);
			}
			return (LOAD_SUCCESS);
		}
	}

	/* If a non-zero slide was specified by the caller, apply now */
	map_addr += slide;

	if (map_addr < result->min_vm_addr)
		result->min_vm_addr = map_addr;
	if (map_addr+seg_size > result->max_vm_addr)
		result->max_vm_addr = map_addr+seg_size;

	if (map == VM_MAP_NULL)
		return (LOAD_SUCCESS);

	map_offset = pager_offset + scp->fileoff;	/* limited to 32 bits */

	if (map_size > 0) {
		initprot = (scp->initprot) & VM_PROT_ALL;
		maxprot = (scp->maxprot) & VM_PROT_ALL;
		/*
		 *	Map a copy of the file into the address space.
		 */
		ret = vm_map_enter_mem_object_control(map,
				&map_addr, map_size, (mach_vm_offset_t)0,
			        VM_FLAGS_FIXED,	control, map_offset, TRUE,
				initprot, maxprot,
				VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return (LOAD_NOSPACE);
	
		/*
		 *	If the file didn't end on a page boundary,
		 *	we need to zero the leftover.
		 */
		delta_size = map_size - scp->filesize;
#if FIXME
		if (delta_size > 0) {
			mach_vm_offset_t	tmp;
	
			ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
			if (ret != KERN_SUCCESS)
				return(LOAD_RESOURCE);
	
			if (copyout(tmp, map_addr + scp->filesize,
								delta_size)) {
				(void) mach_vm_deallocate(
						kernel_map, tmp, delta_size);
				return (LOAD_FAILURE);
			}
	
			(void) mach_vm_deallocate(kernel_map, tmp, delta_size);
		}
#endif /* FIXME */
	}

	/*
	 *	If the virtual size of the segment is greater
	 *	than the size from the file, we need to allocate
	 *	zero fill memory for the rest.
	 */
	delta_size = seg_size - map_size;
	if (delta_size > 0) {
		mach_vm_offset_t tmp = map_addr + map_size;

		ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
				  NULL, 0, FALSE,
				  scp->initprot, scp->maxprot,
				  VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return(LOAD_NOSPACE);
	}

	if ( (scp->fileoff == 0) && (scp->filesize != 0) )
		result->mach_header = map_addr;

	if (scp->flags & SG_PROTECTED_VERSION_1) {
		ret = unprotect_segment(scp->fileoff,
					scp->filesize,
					vp,
					pager_offset,
					map,
					map_addr,
					map_size);
	} else {
		ret = LOAD_SUCCESS;
	}
	if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
		note_all_image_info_section(scp,
		    LC_SEGMENT_64 == lcp->cmd, single_section_size,
		    (const char *)lcp + segment_command_size, slide, result);

	if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size)))
		result->validentry = 1;

	return ret;
}
/**
 * Map pages starting at @a task_addr from @a task into the current process. The mapping
 * will be copy-on-write, and will be checked to ensure a minimum protection value of
 * VM_PROT_READ.
 *
 * @param task The task from which the memory will be mapped.
 * @param task_addr The task-relative address of the memory to be mapped. This is not required to fall on a page boundry.
 * @param length The total size of the mapping to create.
 * @param require_full If false, short mappings will be permitted in the case where a memory object of the requested length
 * does not exist at the target address. It is the caller's responsibility to validate the resulting length of the
 * mapping, eg, using plcrash_async_mobject_remap_address() and similar. If true, and the entire requested page range is
 * not valid, the mapping request will fail.
 * @param[out] result The in-process address at which the pages were mapped.
 * @param[out] result_length The total size, in bytes, of the mapped pages.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 *
 * @note
 * This code previously used vm_remap() to perform atomic remapping of process memory. However, this appeared
 * to trigger a kernel bug (and resulting panic) on iOS 6.0 through 6.1.2, possibly fixed in 6.1.3. Note that
 * no stable release of PLCrashReporter shipped with the vm_remap() code.
 *
 * Investigation of the failure seems to show an over-release of the target vm_map and backing vm_object, leading to
 * NULL dereference, invalid memory references, and in some cases, deadlocks that result in watchdog timeouts.
 *
 * In one example case, the crash occurs in update_first_free_ll() as a NULL dereference of the vm_map_entry_t parameter.
 * Analysis of the limited reports shows that this is called via vm_map_store_update_first_free(). No backtrace is
 * available from the kernel panics, but analyzing the register state demonstrates:
 * - A reference to vm_map_store_update_first_free() remains in the link register.
 * - Of the following callers, one can be eliminated by register state:
 *     - vm_map_enter - not possible, r3 should be equal to r0
 *     - vm_map_clip_start - possible
 *     - vm_map_clip_unnest - possible
 *     - vm_map_clip_end - possible
 *
 * In the other panic seen in vm_object_reap_pages(), a value of 0x8008 is loaded and deferenced from the next pointer
 * of an element within the vm_object's resident page queue (object->memq).
 *
 * Unfortunately, our ability to investigate has been extremely constrained by the following issues;
 * - The panic is not easily or reliably reproducible
 * - Apple's does not support iOS kernel debugging
 * - There is no support for jailbreak kernel debugging against iOS 6.x devices at the time of writing.
 *
 * The work-around used here is to split the vm_remap() into distinct calls to mach_make_memory_entry_64() and
 * vm_map(); this follows a largely distinct code path from vm_remap(). In testing by a large-scale user of PLCrashReporter,
 * they were no longer able to reproduce the issue with this fix in place. Additionally, they've not been able to reproduce
 * the issue on 6.1.3 devices, or had any reports of the issue occuring on 6.1.3 devices.
 *
 * The mach_make_memory_entry_64() API may not actually return an entry for the full requested length; this requires
 * that we loop through the full range, requesting an entry for the remaining unallocated pages, and then mapping
 * the pages in question. Since this requires multiple calls to vm_map(), we pre-allocate a contigious range of pages
 * for the target mappings into which we'll insert (via overwrite) our own mappings.
 *
 * @note
 * As a work-around for bugs in Apple's Mach-O/dyld implementation, we provide the @a require_full flag; if false,
 * a successful mapping that is smaller than the requested range may be made, and will not return an error. This is necessary
 * to allow our callers to work around bugs in update_dyld_shared_cache(1), which writes out a larger Mach-O VM segment
 * size value than is actually available and mappable. See the plcrash_async_macho_map_segment() API documentation for
 * more details. This bug has been reported to Apple as rdar://13707406.
 */
static plcrash_error_t plcrash_async_mobject_remap_pages_workaround (mach_port_t task,
                                                                     pl_vm_address_t task_addr,
                                                                     pl_vm_size_t length,
                                                                     bool require_full,
                                                                     pl_vm_address_t *result,
                                                                     pl_vm_size_t *result_length)
{
    kern_return_t kt;

    /* Compute the total required page size. */
    pl_vm_address_t base_addr = mach_vm_trunc_page(task_addr);
    pl_vm_size_t total_size = mach_vm_round_page(length + (task_addr - base_addr));
    
    /*
     * If short mappings are permitted, determine the actual mappable size of the target range. Due
     * to rdar://13707406 (update_dyld_shared_cache appears to write invalid LINKEDIT vmsize), an
     * LC_SEGMENT-reported VM size may be far larger than the actual mapped pages. This would result
     * in us making large (eg, 36MB) allocations in cases where the mappable range is actually much
     * smaller, which can trigger out-of-memory conditions on smaller devices.
     */
    if (!require_full) {
        pl_vm_size_t verified_size = 0;
        
        while (verified_size < total_size) {            
            memory_object_size_t entry_length = total_size - verified_size;
            mach_port_t mem_handle;
            
            /* Fetch an entry reference */
            kt = mach_make_memory_entry_64(task, &entry_length, base_addr + verified_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
            if (kt != KERN_SUCCESS) {
                /* Once we hit an unmappable page, break */
                break;
            }
            
            /* Drop the reference */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }

            /* Note the size */
            verified_size += entry_length;
        }

        /* No valid page found at the task_addr */
        if (verified_size == 0) {
            PLCF_DEBUG("No mappable pages found at 0x%" PRIx64, (uint64_t) task_addr);
            return PLCRASH_ENOMEM;
        }

        /* Reduce the total size to the verified size */
        if (verified_size < total_size)
            total_size = verified_size;
    }

    /*
     * Set aside a memory range large enough for the total requested number of pages. Ideally the kernel
     * will lazy-allocate the backing physical pages so that we don't waste actual memory on this
     * pre-emptive page range reservation.
     */
    pl_vm_address_t mapping_addr = 0x0;
    pl_vm_size_t mapped_size = 0;
#ifdef PL_HAVE_MACH_VM
    kt = mach_vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#else
    kt = vm_allocate(mach_task_self(), &mapping_addr, total_size, VM_FLAGS_ANYWHERE);
#endif

    if (kt != KERN_SUCCESS) {
        PLCF_DEBUG("Failed to allocate a target page range for the page remapping: %d", kt);
        return PLCRASH_EINTERNAL;
    }

    /* Map the source pages into the allocated region, overwriting the existing page mappings */
    while (mapped_size < total_size) {
        /* Create a reference to the target pages. The returned entry may be smaller than the total length. */
        memory_object_size_t entry_length = total_size - mapped_size;
        mach_port_t mem_handle;
        kt = mach_make_memory_entry_64(task, &entry_length, base_addr + mapped_size, VM_PROT_READ, &mem_handle, MACH_PORT_NULL);
        if (kt != KERN_SUCCESS) {            
            /* No pages are found at the target. When validating the total length above, we already verified the
             * availability of the requested pages; if they've now disappeared, we can treat it as an error,
             * even if !require_full was specified */
            PLCF_DEBUG("mach_make_memory_entry_64() failed: %d", kt);
            
            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }
            
            /* Return error */
            return PLCRASH_ENOMEM;
        }
        
        /* Map the pages into our local task, overwriting the allocation used to reserve the target space above. */
        pl_vm_address_t target_address = mapping_addr + mapped_size;
#ifdef PL_HAVE_MACH_VM
        kt = mach_vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#else
        kt = vm_map(mach_task_self(), &target_address, entry_length, 0x0, VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, mem_handle, 0x0, TRUE, VM_PROT_READ, VM_PROT_READ, VM_INHERIT_COPY);
#endif /* !PL_HAVE_MACH_VM */
        
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("vm_map() failure: %d", kt);

            /* Clean up the reserved pages */
            kt = vm_deallocate(mach_task_self(), mapping_addr, total_size);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("vm_deallocate() failed: %d", kt);
            }

            /* Drop the memory handle */
            kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
            if (kt != KERN_SUCCESS) {
                PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
            }
            
            return PLCRASH_ENOMEM;
        }

        /* Drop the memory handle */
        kt = mach_port_mod_refs(mach_task_self(), mem_handle, MACH_PORT_RIGHT_SEND, -1);
        if (kt != KERN_SUCCESS) {
            PLCF_DEBUG("mach_port_mod_refs(-1) failed: %d", kt);
        }
        
        /* Adjust the total mapping size */
        mapped_size += entry_length;
    }
    
    *result = mapping_addr;
    *result_length = mapped_size;

    return PLCRASH_ESUCCESS;
}
Exemplo n.º 10
0
static
load_return_t
load_segment_64(
    struct segment_command_64	*scp64,
    void *				pager,
    off_t				pager_offset,
    off_t				macho_size,
    __unused off_t			end_of_file,
    vm_map_t			map,
    load_result_t		*result
)
{
    kern_return_t		ret;
    mach_vm_offset_t	map_addr, map_offset;
    mach_vm_size_t		map_size, seg_size, delta_size;
    vm_prot_t 		initprot;
    vm_prot_t		maxprot;

    /*
     * Make sure what we get from the file is really ours (as specified
     * by macho_size).
     */
    if (scp64->fileoff + scp64->filesize > (uint64_t)macho_size)
        return (LOAD_BADMACHO);
    /*
     * Make sure the segment is page-aligned in the file.
     */
    if ((scp64->fileoff & PAGE_MASK_64) != 0)
        return LOAD_BADMACHO;

    seg_size = round_page_64(scp64->vmsize);
    if (seg_size == 0)
        return(KERN_SUCCESS);

    /*
     *	Round sizes to page size.
     */
    map_size = round_page_64(scp64->filesize);	/* limited to 32 bits */
    map_addr = round_page_64(scp64->vmaddr);

    if (map_addr == 0 &&
            map_size == 0 &&
            seg_size != 0 &&
            (scp64->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
            (scp64->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
        /*
         * This is a "page zero" segment:  it starts at address 0,
         * is not mapped from the binary file and is not accessible.
         * User-space should never be able to access that memory, so
         * make it completely off limits by raising the VM map's
         * minimum offset.
         */
        ret = vm_map_raise_min_offset(map, seg_size);
        if (ret != KERN_SUCCESS) {
            return LOAD_FAILURE;
        }
        return LOAD_SUCCESS;
    }

    map_offset = pager_offset + scp64->fileoff;	/* limited to 32 bits */

    if (map_size > 0) {
        initprot = (scp64->initprot) & VM_PROT_ALL;
        maxprot = (scp64->maxprot) & VM_PROT_ALL;
        /*
         *	Map a copy of the file into the address space.
         */
        ret = mach_vm_map(map,
                          &map_addr, map_size, (mach_vm_offset_t)0,
                          VM_FLAGS_FIXED,	pager, map_offset, TRUE,
                          initprot, maxprot,
                          VM_INHERIT_DEFAULT);
        if (ret != KERN_SUCCESS)
            return(LOAD_NOSPACE);

        /*
         *	If the file didn't end on a page boundary,
         *	we need to zero the leftover.
         */
        delta_size = map_size - scp64->filesize;
#if FIXME
        if (delta_size > 0) {
            mach_vm_offset_t	tmp;

            ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
            if (ret != KERN_SUCCESS)
                return(LOAD_RESOURCE);

            if (copyout(tmp, map_addr + scp64->filesize,
                        delta_size)) {
                (void) vm_deallocate(
                    kernel_map, tmp, delta_size);
                return (LOAD_FAILURE);
            }

            (void) vm_deallocate(kernel_map, tmp, delta_size);
        }
#endif /* FIXME */
    }

    /*
     *	If the virtual size of the segment is greater
     *	than the size from the file, we need to allocate
     *	zero fill memory for the rest.
     */
    delta_size = seg_size - map_size;
    if (delta_size > 0) {
        mach_vm_offset_t tmp = map_addr + map_size;

        ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
                          NULL, 0, FALSE,
                          scp64->initprot, scp64->maxprot,
                          VM_INHERIT_DEFAULT);
        if (ret != KERN_SUCCESS)
            return(LOAD_NOSPACE);
    }

    if ( (scp64->fileoff == 0) && (scp64->filesize != 0) )
        result->mach_header = map_addr;

    if (scp64->flags & SG_PROTECTED_VERSION_1) {
        ret = unprotect_segment_64(scp64->fileoff,
                                   scp64->filesize,
                                   map,
                                   map_addr,
                                   map_size);
    } else {
        ret = LOAD_SUCCESS;
    }

    return ret;
}