예제 #1
0
void
swap_pager_swap_init()
{
	swp_clean_t spc;
	struct buf *bp;
	int i;

	/*
	 * kva's are allocated here so that we dont need to keep doing
	 * kmem_alloc pageables at runtime
	 */
	for (i = 0, spc = swcleanlist; i < npendingio; i++, spc++) {
		spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE * MAX_PAGEOUT_CLUSTER);
		if (!spc->spc_kva) {
			break;
		}
		spc->spc_bp = malloc(sizeof(*bp), M_TEMP, M_KERNEL);
		if (!spc->spc_bp) {
			kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE);
			break;
		}
		spc->spc_flags = 0;
		TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
		swap_pager_free_count++;
	}
}
예제 #2
0
파일: pmap.c 프로젝트: DJHartley/xnu
/**
 * io_map
 *
 * Maps an IO region and returns its virtual address.
 */
vm_offset_t
io_map(vm_offset_t phys_addr, vm_size_t size, unsigned int flags)
{
	vm_offset_t	start;

	if (kernel_map == VM_MAP_NULL) {
	    /*
	     * VM is not initialized.  Grab memory.
	     */
	    start = virt_begin;
	    virt_begin += round_page(size);

	    (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size),
			       VM_PROT_READ|VM_PROT_WRITE,
			       flags);
	}
	else {
	    (void) kmem_alloc_pageable(kernel_map, &start, round_page(size));
	    (void) pmap_map(start, phys_addr, phys_addr + round_page(size),
			    VM_PROT_READ|VM_PROT_WRITE,
			    flags);
	}

	return (start);
}
예제 #3
0
static          vm_offset_t
dpt_physmap(u_int32_t req_paddr, vm_size_t req_size)
{
	vm_offset_t     va;
	int             ndx;
	vm_size_t       size;
	u_int32_t       paddr;
	u_int32_t       offset;



	size = (req_size / PAGE_SIZE + 1) * PAGE_SIZE;
	paddr = req_paddr & 0xfffff000;
	offset = req_paddr - paddr;

	va = kmem_alloc_pageable(kernel_map, size);
	if (va == (vm_offset_t) 0)
		return (va);

	for (ndx = 0; ndx < size; ndx += PAGE_SIZE) {
		pmap_kenter(va + ndx, paddr + ndx);
		invltlb();
	}

	return (va + offset);
}
예제 #4
0
파일: kalloc.c 프로젝트: CptFrazz/xnu
void *
OSMalloc(
	uint32_t			size,
	OSMallocTag			tag)
{
	void			*addr=NULL;
	kern_return_t	kr;

	OSMalloc_Tagref(tag);
	if ((tag->OSMT_attr & OSMT_PAGEABLE)
	    && (size & ~PAGE_MASK)) {

		if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
			addr = NULL;
	} else 
		addr = kalloc((vm_size_t)size);

	if (!addr)
		OSMalloc_Tagrele(tag);

	return(addr);
}
예제 #5
0
/*
 * Allocate and map memory for devices that may need to be mapped before
 * Mach VM is running.
 */
vm_offset_t
io_map(
	vm_offset_t	phys_addr,
	vm_size_t	size)
{
	vm_offset_t	start;

	if (kernel_map == VM_MAP_NULL) {
	    /*
	     * VM is not initialized.  Grab memory.
	     */
	    start = kernel_virtual_start;
	    kernel_virtual_start += round_page(size);
	    printf("stealing kernel virtual addresses %08lx-%08lx\n", start, kernel_virtual_start);
	}
	else {
	    (void) kmem_alloc_pageable(kernel_map, &start, round_page(size));
	}
	(void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size),
			VM_PROT_READ|VM_PROT_WRITE);
	return (start);
}
예제 #6
0
/*
 * vm_contig_pg_kmap:
 *
 * Map previously allocated (vm_contig_pg_alloc) range of pages from
 * vm_page_array[] into the KVA.  Once mapped, the pages are part of
 * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size).
 *
 * No requirements.
 */
static vm_offset_t
vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
{
    vm_offset_t addr;
    vm_paddr_t pa;
    vm_page_t pga = vm_page_array;
    u_long offset;

    if (size == 0)
        panic("vm_contig_pg_kmap: size must not be 0");
    size = round_page(size);
    addr = kmem_alloc_pageable(&kernel_map, size);
    if (addr) {
        pa = VM_PAGE_TO_PHYS(&pga[start]);
        for (offset = 0; offset < size; offset += PAGE_SIZE)
            pmap_kenter_quick(addr + offset, pa + offset);
        smp_invltlb();
        if (flags & M_ZERO)
            bzero((void *)addr, size);
    }
    return(addr);
}
예제 #7
0
kern_return_t
host_ipc_hash_info(
	host_t					host,
	hash_info_bucket_array_t		*infop,
	mach_msg_type_number_t 		*countp)
{
	vm_map_copy_t copy;
	vm_offset_t addr;
	vm_size_t size;
	hash_info_bucket_t *info;
	natural_t count;
	kern_return_t kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	/* start with in-line data */

	count = ipc_hash_size();
	size = round_page(count * sizeof(hash_info_bucket_t));
	kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
	if (kr != KERN_SUCCESS)
		return KERN_RESOURCE_SHORTAGE;

	info = (hash_info_bucket_t *) addr;
	count = ipc_hash_info(info, count);

	if (size > count * sizeof(hash_info_bucket_t))
		bzero((char *)&info[count], size - count * sizeof(hash_info_bucket_t));

	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, 
			   (vm_map_size_t)size, TRUE, &copy);
	assert(kr == KERN_SUCCESS);

	*infop = (hash_info_bucket_t *) copy;
	*countp = count;
	return KERN_SUCCESS;
}
예제 #8
0
kern_return_t
host_ipc_marequest_info(
	host_t 				host,
	unsigned int 			*maxp,
	hash_info_bucket_array_t 	*infop,
	unsigned int 			*countp)
{
	vm_offset_t addr;
	vm_size_t size = 0; /* '=0' to shut up lint */
	hash_info_bucket_t *info;
	unsigned int potential, actual;
	kern_return_t kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	/* start with in-line data */

	info = *infop;
	potential = *countp;

	for (;;) {
		actual = ipc_marequest_info(maxp, info, potential);
		if (actual <= potential)
			break;

		/* allocate more memory */

		if (info != *infop)
			kmem_free(ipc_kernel_map, addr, size);

		size = round_page(actual * sizeof *info);
		kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		info = (hash_info_bucket_t *) addr;
		potential = size/sizeof *info;
	}

	if (info == *infop) {
		/* data fit in-line; nothing to deallocate */

		*countp = actual;
	} else if (actual == 0) {
		kmem_free(ipc_kernel_map, addr, size);

		*countp = 0;
	} else {
		vm_map_copy_t copy;
		vm_size_t used;

		used = round_page(actual * sizeof *info);

		if (used != size)
			kmem_free(ipc_kernel_map, addr + used, size - used);

		kr = vm_map_copyin(ipc_kernel_map, addr, used,
				   TRUE, &copy);
		assert(kr == KERN_SUCCESS);

		*infop = (hash_info_bucket_t *) copy;
		*countp = actual;
	}

	return KERN_SUCCESS;
}
예제 #9
0
/*
 * p->p_token is held on entry.
 */
static int
procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio)
{
	int error;
	int writing;
	struct vmspace *vm;
	vm_map_t map;
	vm_offset_t pageno = 0;		/* page number */
	vm_prot_t reqprot;
	vm_offset_t kva;

	/*
	 * if the vmspace is in the midst of being allocated or deallocated,
	 * or the process is exiting, don't try to grab anything.  The
	 * page table usage in that process may be messed up.
	 */
	vm = p->p_vmspace;
	if (p->p_stat == SIDL || p->p_stat == SZOMB)
		return EFAULT;
	if ((p->p_flags & (P_WEXIT | P_INEXEC)) ||
	    sysref_isinactive(&vm->vm_sysref))
		return EFAULT;

	/*
	 * The map we want...
	 */
	vmspace_hold(vm);
	map = &vm->vm_map;

	writing = (uio->uio_rw == UIO_WRITE);
	reqprot = VM_PROT_READ;
	if (writing)
		reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE;

	kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);

	/*
	 * Only map in one page at a time.  We don't have to, but it
	 * makes things easier.  This way is trivial - right?
	 */
	do {
		vm_offset_t uva;
		vm_offset_t page_offset;	/* offset into page */
		size_t len;
		vm_page_t m;

		uva = (vm_offset_t) uio->uio_offset;

		/*
		 * Get the page number of this segment.
		 */
		pageno = trunc_page(uva);
		page_offset = uva - pageno;

		/*
		 * How many bytes to copy
		 */
		len = szmin(PAGE_SIZE - page_offset, uio->uio_resid);

		/*
		 * Fault the page on behalf of the process
		 */
		m = vm_fault_page(map, pageno, reqprot,
				  VM_FAULT_NORMAL, &error);
		if (error) {
			KKASSERT(m == NULL);
			error = EFAULT;
			break;
		}

		/*
		 * Cleanup tmap then create a temporary KVA mapping and
		 * do the I/O.  We can switch between cpus so don't bother
		 * synchronizing across all cores.
		 */
		pmap_kenter_quick(kva, VM_PAGE_TO_PHYS(m));
		error = uiomove((caddr_t)(kva + page_offset), len, uio);
		pmap_kremove_quick(kva);

		/*
		 * release the page and we are done
		 */
		vm_page_unhold(m);
	} while (error == 0 && uio->uio_resid > 0);

	vmspace_drop(vm);
	kmem_free(&kernel_map, kva, PAGE_SIZE);

	return (error);
}
예제 #10
0
kern_return_t
host_virtual_physical_table_info(
	__DEBUG_ONLY host_t			host,
	__DEBUG_ONLY hash_info_bucket_array_t	*infop,
	__DEBUG_ONLY mach_msg_type_number_t 	*countp)
{
#if !MACH_VM_DEBUG
        return KERN_FAILURE;
#else
	vm_offset_t addr;
	vm_size_t size = 0;
	hash_info_bucket_t *info;
	unsigned int potential, actual;
	kern_return_t kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	/* start with in-line data */

	info = *infop;
	potential = *countp;

	for (;;) {
		actual = vm_page_info(info, potential);
		if (actual <= potential)
			break;

		/* allocate more memory */

		if (info != *infop)
			kmem_free(ipc_kernel_map, addr, size);

		size = vm_map_round_page(actual * sizeof *info,
					 VM_MAP_PAGE_MASK(ipc_kernel_map));
		kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
		if (kr != KERN_SUCCESS)
			return KERN_RESOURCE_SHORTAGE;

		info = (hash_info_bucket_t *) addr;
		potential = (unsigned int) (size/sizeof (*info));
	}

	if (info == *infop) {
		/* data fit in-line; nothing to deallocate */

		*countp = actual;
	} else if (actual == 0) {
		kmem_free(ipc_kernel_map, addr, size);

		*countp = 0;
	} else {
		vm_map_copy_t copy;
		vm_size_t used;

		used = vm_map_round_page(actual * sizeof *info,
					 VM_MAP_PAGE_MASK(ipc_kernel_map));

		if (used != size)
			kmem_free(ipc_kernel_map, addr + used, size - used);

		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
				   (vm_map_size_t)used, TRUE, &copy);
		assert(kr == KERN_SUCCESS);

		*infop = (hash_info_bucket_t *) copy;
		*countp = actual;
	}

	return KERN_SUCCESS;
#endif /* MACH_VM_DEBUG */
}
예제 #11
0
파일: locks.c 프로젝트: JackieXie168/xnu
kern_return_t
host_lockgroup_info(
	host_t					host,
	lockgroup_info_array_t	*lockgroup_infop,
	mach_msg_type_number_t	*lockgroup_infoCntp)
{
	lockgroup_info_t	*lockgroup_info_base;
	lockgroup_info_t	*lockgroup_info;
	vm_offset_t			lockgroup_info_addr;
	vm_size_t			lockgroup_info_size;
	vm_size_t			lockgroup_info_vmsize;
	lck_grp_t			*lck_grp;
	unsigned int		i;
	vm_map_copy_t		copy;
	kern_return_t		kr;

	if (host == HOST_NULL)
		return KERN_INVALID_HOST;

	lck_mtx_lock(&lck_grp_lock);

	lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info);
	lockgroup_info_vmsize = round_page(lockgroup_info_size);
	kr = kmem_alloc_pageable(ipc_kernel_map,
						 &lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC);
	if (kr != KERN_SUCCESS) {
		lck_mtx_unlock(&lck_grp_lock);
		return(kr);
	}

	lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
	lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
	lockgroup_info = lockgroup_info_base;

	for (i = 0; i < lck_grp_cnt; i++) {

		lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
		lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
		lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
		lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
		lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
		lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;

		lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
		lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
		lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
		lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
		lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
		lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
		lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
		lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
		lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;

		lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
		lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
		lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
		lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
		lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
		lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
		lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
		lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
		lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;

		(void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);

		lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
		lockgroup_info++;
	}

	*lockgroup_infoCntp = lck_grp_cnt;
	lck_mtx_unlock(&lck_grp_lock);

	if (lockgroup_info_size != lockgroup_info_vmsize)
		bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size);

	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
			   (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
	assert(kr == KERN_SUCCESS);

	*lockgroup_infop = (lockgroup_info_t *) copy;

	return(KERN_SUCCESS);
}
예제 #12
0
void
KLDBootstrap::readPrelinkedExtensions(
    kernel_section_t * prelinkInfoSect)
{
    OSArray                   * infoDictArray           = NULL;  // do not release
    OSArray                   * personalitiesArray      = NULL;  // do not release
    OSObject                  * parsedXML       = NULL;  // must release
    OSDictionary              * prelinkInfoDict         = NULL;  // do not release
    OSString                  * errorString             = NULL;  // must release
    OSKext                    * theKernel               = NULL;  // must release

#if CONFIG_KXLD
    kernel_section_t          * kernelLinkStateSection  = NULL;  // see code
#endif
    kernel_segment_command_t  * prelinkLinkStateSegment = NULL;  // see code
    kernel_segment_command_t  * prelinkTextSegment      = NULL;  // see code
    kernel_segment_command_t  * prelinkInfoSegment      = NULL;  // see code

   /* We make some copies of data, but if anything fails we're basically
    * going to fail the boot, so these won't be cleaned up on error.
    */
    void                      * prelinkData             = NULL;  // see code
    void                      * prelinkCopy             = NULL;  // see code
    vm_size_t                   prelinkLength           = 0;
#if !__LP64__ && !defined(__arm__)
    vm_map_offset_t             prelinkDataMapOffset    = 0;
#endif

    kern_return_t               mem_result              = KERN_SUCCESS;

    OSDictionary              * infoDict                = NULL;  // do not release

    IORegistryEntry           * registryRoot            = NULL;  // do not release
    OSNumber                  * prelinkCountObj         = NULL;  // must release

    u_int                       i = 0;

    OSKextLog(/* kext */ NULL,
        kOSKextLogProgressLevel |
        kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
        "Starting from prelinked kernel.");

   /*****
    * Wrap the kernel link state in-place in an OSData.
    * This is unnecessary (and the link state may not be present) if the kernel
    * does not have kxld support because this information is only used for
    * runtime linking.
    */
#if CONFIG_KXLD
    kernelLinkStateSection = getsectbyname(kPrelinkLinkStateSegment,
        kPrelinkKernelLinkStateSection);
    if (!kernelLinkStateSection) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogArchiveFlag,
            "Can't find prelinked kernel link state.");
        goto finish;
    }

    theKernel = OSKext::lookupKextWithIdentifier(kOSKextKernelIdentifier);
    if (!theKernel) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogArchiveFlag,
            "Can't find kernel kext object in prelinked kernel.");
        goto finish;
    }

    prelinkData = (void *) kernelLinkStateSection->addr;
    prelinkLength = kernelLinkStateSection->size;

    mem_result = kmem_alloc_pageable(kernel_map,
        (vm_offset_t *) &prelinkCopy, prelinkLength);
    if (mem_result != KERN_SUCCESS) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't copy prelinked kernel link state.");
        goto finish;
    }
    memcpy(prelinkCopy, prelinkData, prelinkLength);

    theKernel->linkState = OSData::withBytesNoCopy(prelinkCopy, prelinkLength);
    if (!theKernel->linkState) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't create prelinked kernel link state wrapper.");
        goto finish;
    }
    theKernel->linkState->setDeallocFunction(osdata_kmem_free);
#endif

    prelinkTextSegment = getsegbyname(kPrelinkTextSegment);
    if (!prelinkTextSegment) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
            "Can't find prelinked kexts' text segment.");
        goto finish;
    }

    prelinkData = (void *) prelinkTextSegment->vmaddr;
    prelinkLength = prelinkTextSegment->vmsize;

#if !__LP64__
    /* To enable paging and write/execute protections on the kext
     * executables, we need to copy them out of the booter-created
     * memory, reallocate that space with VM, then prelinkCopy them back in.
     * This isn't necessary on LP64 because kexts have their own VM
     * region on that architecture model.
     */

    mem_result = kmem_alloc(kernel_map, (vm_offset_t *)&prelinkCopy,
        prelinkLength);
    if (mem_result != KERN_SUCCESS) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't copy prelinked kexts' text for VM reassign.");
        goto finish;
    }

   /* Copy it out.
    */
    memcpy(prelinkCopy, prelinkData, prelinkLength);
    
   /* Dump the booter memory.
    */
    ml_static_mfree((vm_offset_t)prelinkData, prelinkLength);

   /* Set up the VM region.
    */
    prelinkDataMapOffset = (vm_map_offset_t)(uintptr_t)prelinkData;
    mem_result = vm_map_enter_mem_object(
        kernel_map,
        &prelinkDataMapOffset,
        prelinkLength, /* mask */ 0, 
        VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, 
        (ipc_port_t)NULL,
        (vm_object_offset_t) 0,
        /* copy */ FALSE,
        /* cur_protection */ VM_PROT_ALL,
        /* max_protection */ VM_PROT_ALL,
        /* inheritance */ VM_INHERIT_DEFAULT);
    if ((mem_result != KERN_SUCCESS) || 
        (prelinkTextSegment->vmaddr != prelinkDataMapOffset)) 
    {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't create kexts' text VM entry at 0x%llx, length 0x%x (error 0x%x).",
            (unsigned long long) prelinkDataMapOffset, prelinkLength, mem_result);
        goto finish;
    }
    prelinkData = (void *)(uintptr_t)prelinkDataMapOffset;

   /* And copy it back.
    */
    memcpy(prelinkData, prelinkCopy, prelinkLength);

    kmem_free(kernel_map, (vm_offset_t)prelinkCopy, prelinkLength);
#endif /* !__LP64__ */

   /* Unserialize the info dictionary from the prelink info section.
    */
    parsedXML = OSUnserializeXML((const char *)prelinkInfoSect->addr,
        &errorString);
    if (parsedXML) {
        prelinkInfoDict = OSDynamicCast(OSDictionary, parsedXML);
    }
    if (!prelinkInfoDict) {
        const char * errorCString = "(unknown error)";
        
        if (errorString && errorString->getCStringNoCopy()) {
            errorCString = errorString->getCStringNoCopy();
        } else if (parsedXML) {
            errorCString = "not a dictionary";
        }
        OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag,
            "Error unserializing prelink plist: %s.", errorCString);
        goto finish;
    }

    infoDictArray = OSDynamicCast(OSArray, 
        prelinkInfoDict->getObject(kPrelinkInfoDictionaryKey));
    if (!infoDictArray) {
        OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag,
            "The prelinked kernel has no kext info dictionaries");
        goto finish;
    }

   /* Create OSKext objects for each info dictionary.
    */
    for (i = 0; i < infoDictArray->getCount(); ++i) {
        infoDict = OSDynamicCast(OSDictionary, infoDictArray->getObject(i));
        if (!infoDict) {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
                "Can't find info dictionary for prelinked kext #%d.", i);
            continue;
        }

       /* Create the kext for the entry, then release it, because the
        * kext system keeps them around until explicitly removed.
        * Any creation/registration failures are already logged for us.
        */
        OSKext * newKext = OSKext::withPrelinkedInfoDict(infoDict);
        OSSafeReleaseNULL(newKext);
    }
    
    /* Get all of the personalities for kexts that were not prelinked and
     * add them to the catalogue.
     */
    personalitiesArray = OSDynamicCast(OSArray,
        prelinkInfoDict->getObject(kPrelinkPersonalitiesKey));
    if (!personalitiesArray) {
        OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag,
            "The prelinked kernel has no personalities array");
        goto finish;
    }

    if (personalitiesArray->getCount()) {
        OSKext::setPrelinkedPersonalities(personalitiesArray);
    }

   /* Store the number of prelinked kexts in the registry so we can tell
    * when the system has been started from a prelinked kernel.
    */
    registryRoot = IORegistryEntry::getRegistryRoot();
    assert(registryRoot);

    prelinkCountObj = OSNumber::withNumber(
        (unsigned long long)infoDictArray->getCount(),
        8 * sizeof(uint32_t));
    assert(prelinkCountObj);
    if (prelinkCountObj) {
        registryRoot->setProperty(kOSPrelinkKextCountKey, prelinkCountObj);
    }

    OSSafeReleaseNULL(prelinkCountObj);
    prelinkCountObj = OSNumber::withNumber(
        (unsigned long long)personalitiesArray->getCount(),
        8 * sizeof(uint32_t));
    assert(prelinkCountObj);
    if (prelinkCountObj) {
        registryRoot->setProperty(kOSPrelinkPersonalityCountKey, prelinkCountObj);
    }

    OSKextLog(/* kext */ NULL,
        kOSKextLogProgressLevel |
        kOSKextLogGeneralFlag | kOSKextLogKextBookkeepingFlag |
        kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
        "%u prelinked kexts, and %u additional personalities.", 
        infoDictArray->getCount(), personalitiesArray->getCount());

#if __LP64__
        /* On LP64 systems, kexts are copied to their own special VM region
         * during OSKext init time, so we can free the whole segment now.
         */
        ml_static_mfree((vm_offset_t) prelinkData, prelinkLength);
#endif /* __LP64__ */

   /* Free the link state segment, kexts have copied out what they need.
    */
    prelinkLinkStateSegment = getsegbyname(kPrelinkLinkStateSegment);
    if (prelinkLinkStateSegment) {
        ml_static_mfree((vm_offset_t)prelinkLinkStateSegment->vmaddr,
            (vm_size_t)prelinkLinkStateSegment->vmsize);
    }

   /* Free the prelink info segment, we're done with it.
    */
    prelinkInfoSegment = getsegbyname(kPrelinkInfoSegment);
    if (prelinkInfoSegment) {
        ml_static_mfree((vm_offset_t)prelinkInfoSegment->vmaddr,
            (vm_size_t)prelinkInfoSegment->vmsize);
    }

finish:
    OSSafeRelease(errorString);
    OSSafeRelease(parsedXML);
    OSSafeRelease(theKernel);
    OSSafeRelease(prelinkCountObj);
    return;
}
예제 #13
0
파일: sys_process.c 프로젝트: MarginC/kame
int
proc_rwmem(struct proc *p, struct uio *uio)
{
	struct vmspace *vm;
	vm_map_t map;
	vm_object_t object = NULL;
	vm_offset_t pageno = 0;		/* page number */
	vm_prot_t reqprot;
	vm_offset_t kva;
	int error, writing;

	GIANT_REQUIRED;

	/*
	 * if the vmspace is in the midst of being deallocated or the
	 * process is exiting, don't try to grab anything.  The page table
	 * usage in that process can be messed up.
	 */
	vm = p->p_vmspace;
	if ((p->p_flag & P_WEXIT))
		return (EFAULT);
	if (vm->vm_refcnt < 1)
		return (EFAULT);
	++vm->vm_refcnt;
	/*
	 * The map we want...
	 */
	map = &vm->vm_map;

	writing = uio->uio_rw == UIO_WRITE;
	reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
	    VM_PROT_READ;

	kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE);

	/*
	 * Only map in one page at a time.  We don't have to, but it
	 * makes things easier.  This way is trivial - right?
	 */
	do {
		vm_map_t tmap;
		vm_offset_t uva;
		int page_offset;		/* offset into page */
		vm_map_entry_t out_entry;
		vm_prot_t out_prot;
		boolean_t wired;
		vm_pindex_t pindex;
		u_int len;
		vm_page_t m;

		object = NULL;

		uva = (vm_offset_t)uio->uio_offset;

		/*
		 * Get the page number of this segment.
		 */
		pageno = trunc_page(uva);
		page_offset = uva - pageno;

		/*
		 * How many bytes to copy
		 */
		len = min(PAGE_SIZE - page_offset, uio->uio_resid);

		/*
		 * Fault the page on behalf of the process
		 */
		error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
		if (error) {
			error = EFAULT;
			break;
		}

		/*
		 * Now we need to get the page.  out_entry, out_prot, wired,
		 * and single_use aren't used.  One would think the vm code
		 * would be a *bit* nicer...  We use tmap because
		 * vm_map_lookup() can change the map argument.
		 */
		tmap = map;
		error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
		    &object, &pindex, &out_prot, &wired);

		if (error) {
			error = EFAULT;

			/*
			 * Make sure that there is no residue in 'object' from
			 * an error return on vm_map_lookup.
			 */
			object = NULL;

			break;
		}

		m = vm_page_lookup(object, pindex);

		/* Allow fallback to backing objects if we are reading */

		while (m == NULL && !writing && object->backing_object) {

			pindex += OFF_TO_IDX(object->backing_object_offset);
			object = object->backing_object;
			
			m = vm_page_lookup(object, pindex);
		}

		if (m == NULL) {
			error = EFAULT;

			/*
			 * Make sure that there is no residue in 'object' from
			 * an error return on vm_map_lookup.
			 */
			object = NULL;

			vm_map_lookup_done(tmap, out_entry);

			break;
		}

		/*
		 * Wire the page into memory
		 */
		vm_page_lock_queues();
		vm_page_wire(m);
		vm_page_unlock_queues();

		/*
		 * We're done with tmap now.
		 * But reference the object first, so that we won't loose
		 * it.
		 */
		vm_object_reference(object);
		vm_map_lookup_done(tmap, out_entry);

		pmap_qenter(kva, &m, 1);

		/*
		 * Now do the i/o move.
		 */
		error = uiomove((caddr_t)(kva + page_offset), len, uio);

		pmap_qremove(kva, 1);

		/*
		 * release the page and the object
		 */
		vm_page_lock_queues();
		vm_page_unwire(m, 1);
		vm_page_unlock_queues();
		vm_object_deallocate(object);

		object = NULL;

	} while (error == 0 && uio->uio_resid > 0);

	if (object)
		vm_object_deallocate(object);

	kmem_free(kernel_map, kva, PAGE_SIZE);
	vmspace_free(vm);
	return (error);
}
예제 #14
0
파일: fbt.c 프로젝트: 0xffea/xnu
void
fbt_init( void )
{

	PE_parse_boot_argn("DisableFBT", &gDisableFBT, sizeof (gDisableFBT));

	if (0 == gDisableFBT)
	{
		int majdevno = cdevsw_add(FBT_MAJOR, &fbt_cdevsw);
		unsigned long size = 0, header_size, round_size;
	   	kern_return_t ret;
		void *p, *q;
		
		if (majdevno < 0) {
			printf("fbt_init: failed to allocate a major number!\n");
			return;
		}

		/*
		 * Capture the kernel's mach_header in its entirety and the contents of
		 * its LINKEDIT segment (and only that segment). This is sufficient to
		 * build all the fbt probes lazily the first time a client looks to
		 * the fbt provider. Remeber these on the global struct modctl g_fbt_kernctl.
		 */
		header_size = sizeof(kernel_mach_header_t) + _mh_execute_header.sizeofcmds;
		p = getsegdatafromheader(&_mh_execute_header, SEG_LINKEDIT, &size);

        round_size = round_page(header_size + size);
		/* "q" will accomodate copied kernel_mach_header_t, its load commands, and LINKEIT segment. */
		ret = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&q, round_size);

		if (p && (ret == KERN_SUCCESS)) {
			kernel_segment_command_t *sgp;

			bcopy( (void *)&_mh_execute_header, q, header_size);
			bcopy( p, (char *)q + header_size, size);

			sgp = getsegbynamefromheader(q, SEG_LINKEDIT);

			if (sgp) {
				sgp->vmaddr = (uintptr_t)((char *)q + header_size);
				g_fbt_kernctl.address = (vm_address_t)q;
				g_fbt_kernctl.size = header_size + size;
			} else {
				kmem_free(kernel_map, (vm_offset_t)q, round_size);
				g_fbt_kernctl.address = (vm_address_t)NULL;
				g_fbt_kernctl.size = 0;
			}
		} else {
			if (ret == KERN_SUCCESS)
				kmem_free(kernel_map, (vm_offset_t)q, round_size);
			g_fbt_kernctl.address = (vm_address_t)NULL;
			g_fbt_kernctl.size = 0;
		}

		strncpy((char *)&(g_fbt_kernctl.mod_modname), "mach_kernel", KMOD_MAX_NAME);
		((char *)&(g_fbt_kernctl.mod_modname))[KMOD_MAX_NAME -1] = '\0';

		fbt_attach( (dev_info_t	*)(uintptr_t)majdevno, DDI_ATTACH );

		gDisableFBT = 1; /* Ensure this initialization occurs just one time. */
	}
	else
		printf("fbt_init: DisableFBT non-zero, no FBT probes will be provided.\n");
}