示例#1
0
/*
 * Write bytes to task address space for debugger.
 */
void
db_write_bytes(
	vm_offset_t	addr,
	int		size,
	char		*data,
	task_t		task)
{
	int		n,max;
	addr64_t	phys_dst;
	addr64_t 	phys_src;
	pmap_t	pmap;
	
	while (size > 0) {

		phys_src = db_vtophys(kernel_pmap, (vm_offset_t)data); 
		if (phys_src == 0) {
			db_printf("\nno memory is assigned to src address %08x\n",
				  data);
			db_error(0);
			/* NOTREACHED */
		}
		
		/* space stays as kernel space unless in another task */
		if (task == NULL) pmap = kernel_pmap;
		else pmap = task->map->pmap;

		phys_dst = db_vtophys(pmap, (vm_offset_t)addr);  
		if (phys_dst == 0) {
			db_printf("\nno memory is assigned to dst address %08x\n",
				  addr);
			db_error(0);
			/* NOTREACHED */
		}

		/* don't over-run any page boundaries - check src range */
		max = round_page_64(phys_src + 1) - phys_src;
		if (max > size)
			max = size;
		/* Check destination won't run over boundary either */
		n = round_page_64(phys_dst + 1) - phys_dst;
		if (n < max)
			max = n;
		size -= max;
		addr += max;
		phys_copy(phys_src, phys_dst, max);

		/* resync I+D caches */
		sync_cache64(phys_dst, max);

		phys_src += max;
		phys_dst += max;
	}
}
示例#2
0
kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
{
    pmap_t	pmap = map->pmap;

    pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));

    return( KERN_SUCCESS );
}
示例#3
0
文件: wait_queue.c 项目: Prajna/xnu
static uint32_t
compute_wait_hash_size(__unused unsigned cpu_count, __unused uint64_t memsize) {
	uint32_t hsize = (uint32_t)round_page_64((thread_max / 11) * sizeof(struct wait_queue));
	uint32_t bhsize;
	
	if (PE_parse_boot_argn("wqsize", &bhsize, sizeof(bhsize)))
		hsize = bhsize;

	return hsize;
}
示例#4
0
kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
			mach_vm_size_t length, unsigned int options)
{
    vm_prot_t	 prot;
    unsigned int flags;
    ppnum_t	 pagenum;
    pmap_t 	 pmap = map->pmap;

    prot = (options & kIOMapReadOnly)
		? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);

    pagenum = (ppnum_t)atop_64(pa);

    switch(options & kIOMapCacheMask ) {			/* What cache mode do we need? */

	case kIOMapDefaultCache:
	default:
	    flags = IODefaultCacheBits(pa);
	    break;

	case kIOMapInhibitCache:
	    flags = VM_WIMG_IO;
	    break;

	case kIOMapWriteThruCache:
	    flags = VM_WIMG_WTHRU;
	    break;

	case kIOMapWriteCombineCache:
	    flags = VM_WIMG_WCOMB;
	    break;

	case kIOMapCopybackCache:
	    flags = VM_WIMG_COPYBACK;
	    break;
	case kIOMapCopybackInnerCache:
	    flags = VM_WIMG_INNERWBACK;
	    break;
    }

    pmap_set_cache_attributes(pagenum, flags);

    vm_map_set_cache_attr(map, (vm_map_offset_t)va);


    // Set up a block mapped area
    pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);

    return( KERN_SUCCESS );
}
示例#5
0
static
load_return_t
load_segment(
	struct load_command		*lcp,
	uint32_t			filetype,
	void *				control,
	off_t				pager_offset,
	off_t				macho_size,
	struct vnode			*vp,
	vm_map_t			map,
	int64_t				slide,
	load_result_t		*result
)
{
	struct segment_command_64 segment_command, *scp;
	kern_return_t		ret;
	vm_map_offset_t		map_addr, map_offset;
	vm_map_size_t		map_size, seg_size, delta_size;
	vm_prot_t 		initprot;
	vm_prot_t		maxprot;
	size_t			segment_command_size, total_section_size,
				single_section_size;
	boolean_t		prohibit_pagezero_mapping = FALSE;
	
	if (LC_SEGMENT_64 == lcp->cmd) {
		segment_command_size = sizeof(struct segment_command_64);
		single_section_size  = sizeof(struct section_64);
	} else {
		segment_command_size = sizeof(struct segment_command);
		single_section_size  = sizeof(struct section);
	}
	if (lcp->cmdsize < segment_command_size)
		return (LOAD_BADMACHO);
	total_section_size = lcp->cmdsize - segment_command_size;

	if (LC_SEGMENT_64 == lcp->cmd)
		scp = (struct segment_command_64 *)lcp;
	else {
		scp = &segment_command;
		widen_segment_command((struct segment_command *)lcp, scp);
	}

	/*
	 * Make sure what we get from the file is really ours (as specified
	 * by macho_size).
	 */
	if (scp->fileoff + scp->filesize < scp->fileoff ||
	    scp->fileoff + scp->filesize > (uint64_t)macho_size)
		return (LOAD_BADMACHO);
	/*
	 * Ensure that the number of sections specified would fit
	 * within the load command size.
	 */
	if (total_section_size / single_section_size < scp->nsects)
		return (LOAD_BADMACHO);
	/*
	 * Make sure the segment is page-aligned in the file.
	 */
	if ((scp->fileoff & PAGE_MASK_64) != 0)
		return (LOAD_BADMACHO);

	/*
	 *	Round sizes to page size.
	 */
	seg_size = round_page_64(scp->vmsize);
	map_size = round_page_64(scp->filesize);
	map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
	if (seg_size == 0)
		return (KERN_SUCCESS);
	if (map_addr == 0 &&
	    map_size == 0 &&
	    seg_size != 0 &&
	    (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
	    (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
		/*
		 * For PIE, extend page zero rather than moving it.  Extending
		 * page zero keeps early allocations from falling predictably
		 * between the end of page zero and the beginning of the first
		 * slid segment.
		 */
		seg_size += slide;
		slide = 0;
#if CONFIG_EMBEDDED
		prohibit_pagezero_mapping = TRUE;
#endif
		/* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
		if (scp->cmd == LC_SEGMENT_64) {
		        prohibit_pagezero_mapping = TRUE;
		}
		
		if (prohibit_pagezero_mapping) {
			/*
			 * This is a "page zero" segment:  it starts at address 0,
			 * is not mapped from the binary file and is not accessible.
			 * User-space should never be able to access that memory, so
			 * make it completely off limits by raising the VM map's
			 * minimum offset.
			 */
			ret = vm_map_raise_min_offset(map, seg_size);
			if (ret != KERN_SUCCESS) {
				return (LOAD_FAILURE);
			}
			return (LOAD_SUCCESS);
		}
	}

	/* If a non-zero slide was specified by the caller, apply now */
	map_addr += slide;

	if (map_addr < result->min_vm_addr)
		result->min_vm_addr = map_addr;
	if (map_addr+seg_size > result->max_vm_addr)
		result->max_vm_addr = map_addr+seg_size;

	if (map == VM_MAP_NULL)
		return (LOAD_SUCCESS);

	map_offset = pager_offset + scp->fileoff;	/* limited to 32 bits */

	if (map_size > 0) {
		initprot = (scp->initprot) & VM_PROT_ALL;
		maxprot = (scp->maxprot) & VM_PROT_ALL;
		/*
		 *	Map a copy of the file into the address space.
		 */
		ret = vm_map_enter_mem_object_control(map,
				&map_addr, map_size, (mach_vm_offset_t)0,
			        VM_FLAGS_FIXED,	control, map_offset, TRUE,
				initprot, maxprot,
				VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return (LOAD_NOSPACE);
	
		/*
		 *	If the file didn't end on a page boundary,
		 *	we need to zero the leftover.
		 */
		delta_size = map_size - scp->filesize;
#if FIXME
		if (delta_size > 0) {
			mach_vm_offset_t	tmp;
	
			ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
			if (ret != KERN_SUCCESS)
				return(LOAD_RESOURCE);
	
			if (copyout(tmp, map_addr + scp->filesize,
								delta_size)) {
				(void) mach_vm_deallocate(
						kernel_map, tmp, delta_size);
				return (LOAD_FAILURE);
			}
	
			(void) mach_vm_deallocate(kernel_map, tmp, delta_size);
		}
#endif /* FIXME */
	}

	/*
	 *	If the virtual size of the segment is greater
	 *	than the size from the file, we need to allocate
	 *	zero fill memory for the rest.
	 */
	delta_size = seg_size - map_size;
	if (delta_size > 0) {
		mach_vm_offset_t tmp = map_addr + map_size;

		ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
				  NULL, 0, FALSE,
				  scp->initprot, scp->maxprot,
				  VM_INHERIT_DEFAULT);
		if (ret != KERN_SUCCESS)
			return(LOAD_NOSPACE);
	}

	if ( (scp->fileoff == 0) && (scp->filesize != 0) )
		result->mach_header = map_addr;

	if (scp->flags & SG_PROTECTED_VERSION_1) {
		ret = unprotect_segment(scp->fileoff,
					scp->filesize,
					vp,
					pager_offset,
					map,
					map_addr,
					map_size);
	} else {
		ret = LOAD_SUCCESS;
	}
	if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
	    result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
		note_all_image_info_section(scp,
		    LC_SEGMENT_64 == lcp->cmd, single_section_size,
		    (const char *)lcp + segment_command_size, slide, result);

	if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size)))
		result->validentry = 1;

	return ret;
}
示例#6
0
static
load_return_t
load_segment_64(
    struct segment_command_64	*scp64,
    void *				pager,
    off_t				pager_offset,
    off_t				macho_size,
    __unused off_t			end_of_file,
    vm_map_t			map,
    load_result_t		*result
)
{
    kern_return_t		ret;
    mach_vm_offset_t	map_addr, map_offset;
    mach_vm_size_t		map_size, seg_size, delta_size;
    vm_prot_t 		initprot;
    vm_prot_t		maxprot;

    /*
     * Make sure what we get from the file is really ours (as specified
     * by macho_size).
     */
    if (scp64->fileoff + scp64->filesize > (uint64_t)macho_size)
        return (LOAD_BADMACHO);
    /*
     * Make sure the segment is page-aligned in the file.
     */
    if ((scp64->fileoff & PAGE_MASK_64) != 0)
        return LOAD_BADMACHO;

    seg_size = round_page_64(scp64->vmsize);
    if (seg_size == 0)
        return(KERN_SUCCESS);

    /*
     *	Round sizes to page size.
     */
    map_size = round_page_64(scp64->filesize);	/* limited to 32 bits */
    map_addr = round_page_64(scp64->vmaddr);

    if (map_addr == 0 &&
            map_size == 0 &&
            seg_size != 0 &&
            (scp64->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
            (scp64->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
        /*
         * This is a "page zero" segment:  it starts at address 0,
         * is not mapped from the binary file and is not accessible.
         * User-space should never be able to access that memory, so
         * make it completely off limits by raising the VM map's
         * minimum offset.
         */
        ret = vm_map_raise_min_offset(map, seg_size);
        if (ret != KERN_SUCCESS) {
            return LOAD_FAILURE;
        }
        return LOAD_SUCCESS;
    }

    map_offset = pager_offset + scp64->fileoff;	/* limited to 32 bits */

    if (map_size > 0) {
        initprot = (scp64->initprot) & VM_PROT_ALL;
        maxprot = (scp64->maxprot) & VM_PROT_ALL;
        /*
         *	Map a copy of the file into the address space.
         */
        ret = mach_vm_map(map,
                          &map_addr, map_size, (mach_vm_offset_t)0,
                          VM_FLAGS_FIXED,	pager, map_offset, TRUE,
                          initprot, maxprot,
                          VM_INHERIT_DEFAULT);
        if (ret != KERN_SUCCESS)
            return(LOAD_NOSPACE);

        /*
         *	If the file didn't end on a page boundary,
         *	we need to zero the leftover.
         */
        delta_size = map_size - scp64->filesize;
#if FIXME
        if (delta_size > 0) {
            mach_vm_offset_t	tmp;

            ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
            if (ret != KERN_SUCCESS)
                return(LOAD_RESOURCE);

            if (copyout(tmp, map_addr + scp64->filesize,
                        delta_size)) {
                (void) vm_deallocate(
                    kernel_map, tmp, delta_size);
                return (LOAD_FAILURE);
            }

            (void) vm_deallocate(kernel_map, tmp, delta_size);
        }
#endif /* FIXME */
    }

    /*
     *	If the virtual size of the segment is greater
     *	than the size from the file, we need to allocate
     *	zero fill memory for the rest.
     */
    delta_size = seg_size - map_size;
    if (delta_size > 0) {
        mach_vm_offset_t tmp = map_addr + map_size;

        ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
                          NULL, 0, FALSE,
                          scp64->initprot, scp64->maxprot,
                          VM_INHERIT_DEFAULT);
        if (ret != KERN_SUCCESS)
            return(LOAD_NOSPACE);
    }

    if ( (scp64->fileoff == 0) && (scp64->filesize != 0) )
        result->mach_header = map_addr;

    if (scp64->flags & SG_PROTECTED_VERSION_1) {
        ret = unprotect_segment_64(scp64->fileoff,
                                   scp64->filesize,
                                   map,
                                   map_addr,
                                   map_size);
    } else {
        ret = LOAD_SUCCESS;
    }

    return ret;
}