예제 #1
0
/*
 * Translate a physical address to an allocated virtual address
 * args:   Physical address
 * return: 0 if successful. The page descriptor and page mapping values are
 *         filled. IT IS THE CALLER RESPONSABILITY to call unxlate_pa_va when
 *         done with it.
 */
static int xlate_pa_va(addr64_t phys, IOMemoryDescriptor **page_desc,
                        IOMemoryMap **page_map)
{
    // Separate page and offset
    //uint64_t page_offset = phys & PAGE_MASK;
    addr64_t page = trunc_page_64(phys);

    *page_desc = (IOMemoryDescriptor::withPhysicalAddress(page, PAGE_SIZE, kIODirectionInOut));
    if (*page_desc == NULL) {
        pmem_error("Can't read from %#016llx, address not in physical memory range",
                   phys);
        // Skip this range as it is not even in the physical address space
        return -1;
    } else {
        // Map the page containing address into kernel address space.
        *page_map = ((*page_desc)->createMappingInTask(kernel_task, 0, kIODirectionInOut, 0, 0));
        // Check if the mapping succeded.
        if (!*page_map) {
            pmem_error("page %#016llx could not be mapped into the kernel, "
                       "zero padding return buffer", page);
            return -1;
        }
    }
    return 0;
}
예제 #2
0
kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
{
    pmap_t	pmap = map->pmap;

    pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));

    return( KERN_SUCCESS );
}
예제 #3
0
// Copy the requested amount to userspace if it doesn't cross page boundaries
// or memory mapped io. If it does, stop at the boundary. Will copy zeroes
// if the given physical address is not backed by physical memory.
//
// args: uio is the userspace io request object
// return: number of bytes copied successfully
//
static uint64_t pmem_partial_read(struct uio *uio, addr64_t start_addr,
                                  addr64_t end_addr) {
    // Separate page and offset
    uint64_t page_offset = start_addr & PAGE_MASK;
    addr64_t page = trunc_page_64(start_addr);
    // don't copy across page boundaries
    uint32_t chunk_len = (uint32_t)MIN(PAGE_SIZE - page_offset,
                                       end_addr - start_addr);
    // Prepare the page for IOKit
    IOMemoryDescriptor *page_desc = (
                                     IOMemoryDescriptor::withPhysicalAddress(page, PAGE_SIZE, kIODirectionIn));
    if (page_desc == NULL) {
        pmem_error("Can't read from %#016llx, address not in physical memory range",
                   start_addr);
        // Skip this range as it is not even in the physical address space
        return chunk_len;
    } else {
        // Map the page containing address into kernel address space.
        IOMemoryMap *page_map = (
                                 page_desc->createMappingInTask(kernel_task, 0, kIODirectionIn, 0, 0));
        // Check if the mapping succeded.
        if (!page_map) {
            pmem_error("page %#016llx could not be mapped into the kernel, "
                       "zero padding return buffer", page);
            // Zero pad this chunk, as it is not inside a valid page frame.
            uiomove64((addr64_t)pmem_zero_page + page_offset,
                      (uint32_t)chunk_len, uio);
        } else {
            // Successfully mapped page, copy contents...
            pmem_log("partial_read");
            log_addr(page_map->getAddress(), 64, "page_map->getAddress()");
            log_addr(page_offset, 64, "page_offset");
            uiomove64(page_map->getAddress() + page_offset, (uint32_t)chunk_len, uio);
            page_map->release();
        }
        page_desc->release();
    }
    return chunk_len;
}
예제 #4
0
파일: pmem.cpp 프로젝트: KarlVogel/rekall
// Copy the requested amount to userspace if it doesn't cross page boundaries
// or memory mapped io. If it does, stop at the boundary. Will copy zeroes
// if the given physical address is not backed by physical memory.
//
// args: uio is the userspace io request object
// return: number of bytes copied successfully
//
static uint64_t pmem_partial_read(struct uio *uio, addr64_t start_addr,
                                  addr64_t end_addr) {
  void *vaddr_page = NULL;
  // Separate page and offset
  uint64_t page_offset = start_addr & PAGE_MASK;
  addr64_t page = trunc_page_64(start_addr);
  // don't copy across page boundaries
  uint32_t chunk_len = (uint32_t)MIN(PAGE_SIZE - page_offset,
                                     end_addr - start_addr);
  if (pmem_map_physical_page(page, &vaddr_page) != KERN_SUCCESS) {
    pmem_error("page %#016llx could not be mapped into the kernel, "
               "zero padding return buffer", page);
    // Zero pad this chunk, as it is not inside a valid page frame.
    uiomove64((addr64_t)pmem_zero_page + page_offset,
              (uint32_t)chunk_len, uio);
  } else {
    // Successfully mapped page, copy contents...
    uiomove64((reinterpret_cast<uint64_t>(vaddr_page) + page_offset),
              (uint32_t)chunk_len, uio);
  }

  return chunk_len;
}
예제 #5
0
파일: mach_loader.c 프로젝트: CptFrazz/xnu
static
load_return_t
load_segment(
	struct load_command		*lcp,
	uint32_t			filetype,
	void *				control,
	off_t				pager_offset,
	off_t				macho_size,
	struct vnode			*vp,
	vm_map_t			map,
	int64_t				slide,
	load_result_t		*result
)
{
	struct segment_command_64 segment_command, *scp;
	kern_return_t		ret;
	vm_map_offset_t		map_addr, map_offset;
	vm_map_size_t		map_size, seg_size, delta_size;
	vm_prot_t 		initprot;
	vm_prot_t		maxprot;
	size_t			segment_command_size, total_section_size,
				single_section_size;
	boolean_t		prohibit_pagezero_mapping = FALSE;
	
	if (LC_SEGMENT_64 == lcp->cmd) {
		segment_command_size = sizeof(struct segment_command_64);
		single_section_size  = sizeof(struct section_64);
	} else {
		segment_command_size = sizeof(struct segment_command);
		single_section_size  = sizeof(struct section);
	}
	if (lcp->cmdsize < segment_command_size)
		return (LOAD_BADMACHO);
	total_section_size = lcp->cmdsize - segment_command_size;

	if (LC_SEGMENT_64 == lcp->cmd)
		scp = (struct segment_command_64 *)lcp;
	else {
		scp = &segment_command;
		widen_segment_command((struct segment_command *)lcp, scp);
	}

	/*
	 * Make sure what we get from the file is really ours (as specified
	 * by macho_size).
	 */
	if (scp->fileoff + scp->filesize < scp->fileoff ||
	    scp->fileoff + scp->filesize > (uint64_t)macho_size)
		return (LOAD_BADMACHO);
	/*
	 * Ensure that the number of sections specified would fit
	 * within the load command size.
	 */
	if (total_section_size / single_section_size < scp->nsects)
		return (LOAD_BADMACHO);
	/*
	 * Make sure the segment is page-aligned in the file.
	 */
	if ((scp->fileoff & PAGE_MASK_64) != 0)
		return (LOAD_BADMACHO);

	/*
	 *	Round sizes to page size.
	 */
	seg_size = round_page_64(scp->vmsize);
	map_size = round_page_64(scp->filesize);
	map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
	if (seg_size == 0)
		return (KERN_SUCCESS);
	if (map_addr == 0 &&
	    map_size == 0 &&
	    seg_size != 0 &&
	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
		/*
		 * For PIE, extend page zero rather than moving it.  Extending
		 * page zero keeps early allocations from falling predictably
		 * between the end of page zero and the beginning of the first
		 * slid segment.
		 */
		seg_size += slide;
		slide = 0;
#if CONFIG_EMBEDDED
		prohibit_pagezero_mapping = TRUE;
#endif
		/* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
		if (scp->cmd == LC_SEGMENT_64) {
		        prohibit_pagezero_mapping = TRUE;
		}
		
		if (prohibit_pagezero_mapping) {
			/*
			 * This is a "page zero" segment:  it starts at address 0,
			 * is not mapped from the binary file and is not accessible.
			 * User-space should never be able to access that memory, so
			 * make it completely off limits by raising the VM map's
			 * minimum offset.
			 */
			ret = vm_map_raise_min_offset(map, seg_size);
			if (ret != KERN_SUCCESS) {
				return (LOAD_FAILURE);
			}
			return (LOAD_SUCCESS);
		}
	}

	/* If a non-zero slide was specified by the caller, apply now */
	map_addr += slide;

	if (map_addr < result->min_vm_addr)
		result->min_vm_addr = map_addr;
	if (map_addr+seg_size > result->max_vm_addr)
		result->max_vm_addr = map_addr+seg_size;

	if (map == VM_MAP_NULL)
		return (LOAD_SUCCESS);

	map_offset = pager_offset + scp->fileoff;	/* limited to 32 bits */

	if (map_size > 0) {
		initprot = (scp->initprot) & VM_PROT_ALL;
		maxprot = (scp->maxprot) & VM_PROT_ALL;
		/*
		 *	Map a copy of the file into the address space.
		 */
		ret = vm_map_enter_mem_object_control(map,
				&map_addr, map_size, (mach_vm_offset_t)0,
			        VM_FLAGS_FIXED,	control, map_offset, TRUE,
				initprot, maxprot,
				VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return (LOAD_NOSPACE);
	
		/*
		 *	If the file didn't end on a page boundary,
		 *	we need to zero the leftover.
		 */
		delta_size = map_size - scp->filesize;
#if FIXME
		if (delta_size > 0) {
			mach_vm_offset_t	tmp;
	
			ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
			if (ret != KERN_SUCCESS)
				return(LOAD_RESOURCE);
	
			if (copyout(tmp, map_addr + scp->filesize,
								delta_size)) {
				(void) mach_vm_deallocate(
						kernel_map, tmp, delta_size);
				return (LOAD_FAILURE);
			}
	
			(void) mach_vm_deallocate(kernel_map, tmp, delta_size);
		}
#endif /* FIXME */
	}

	/*
	 *	If the virtual size of the segment is greater
	 *	than the size from the file, we need to allocate
	 *	zero fill memory for the rest.
	 */
	delta_size = seg_size - map_size;
	if (delta_size > 0) {
		mach_vm_offset_t tmp = map_addr + map_size;

		ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
				  NULL, 0, FALSE,
				  scp->initprot, scp->maxprot,
				  VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return(LOAD_NOSPACE);
	}

	if ( (scp->fileoff == 0) && (scp->filesize != 0) )
		result->mach_header = map_addr;

	if (scp->flags & SG_PROTECTED_VERSION_1) {
		ret = unprotect_segment(scp->fileoff,
					scp->filesize,
					vp,
					pager_offset,
					map,
					map_addr,
					map_size);
	} else {
		ret = LOAD_SUCCESS;
	}
	if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
		note_all_image_info_section(scp,
		    LC_SEGMENT_64 == lcp->cmd, single_section_size,
		    (const char *)lcp + segment_command_size, slide, result);

	if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size)))
		result->validentry = 1;

	return ret;
}