示例#1
0
/*
 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
 * XXX usage is PROT_* from an interface perspective.  Thus the values of
 * XXX VM_PROT_* and PROT_* need to correspond.
 */
int
mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
{
	/*
	 *	Map in special device (must be SHARED) or file
	 */
	struct fileproc *fp;
	register struct		vnode *vp;
	int			flags;
	int			prot, file_prot;
	int			err=0;
	vm_map_t		user_map;
	kern_return_t		result;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t		user_size;
	vm_object_offset_t	pageoff;
	vm_object_offset_t	file_pos;
	int			alloc_flags=0;
	boolean_t		docow;
	vm_prot_t		maxprot;
	void 			*handle;
	vm_pager_t		pager;
	int 			mapanon=0;
	int 			fpref=0;
	int error =0;
	int fd = uap->fd;

	user_addr = (mach_vm_offset_t)uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);
	AUDIT_ARG(fd, uap->fd);

	prot = (uap->prot & VM_PROT_ALL);
#if 3777787
	/*
	 * Since the hardware currently does not support writing without
	 * read-before-write, or execution-without-read, if the request is
	 * for write or execute access, we must imply read access as well;
	 * otherwise programs expecting this to work will fail to operate.
	 */
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* radar 3777787 */

	flags = uap->flags;
	vp = NULLVP;

	/*
	 * The vm code does not have prototypes & compiler doesn't do the'
	 * the right thing when you cast 64bit value and pass it in function 
	 * call. So here it is.
	 */
	file_pos = (vm_object_offset_t)uap->pos;


	/* make sure mapping fits into numeric range etc */
	if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
		return (EINVAL);

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (file_pos & PAGE_MASK);
	file_pos -= (vm_object_offset_t)pageoff;


	/* Adjust size for rounding (on both ends). */
	user_size += pageoff;			/* low end... */
	user_size = mach_vm_round_page(user_size);	/* hi end */


	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & MAP_FIXED) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		user_addr -= pageoff;
		if (user_addr & PAGE_MASK)
		return (EINVAL);
	}
#ifdef notyet
	/* DO not have apis to get this info, need to wait till then*/
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
		addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);

#endif

	alloc_flags = 0;

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.  Use positive fds as the alias
		 * value for memory tracking. 
		 */
		if (fd != -1) {
			/*
			 * Use "fd" to pass (some) Mach VM allocation flags,
			 * (see the VM_FLAGS_* definitions).
			 */
			alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
					    VM_FLAGS_PURGABLE);
			if (alloc_flags != fd) {
				/* reject if there are any extra flags */
				return EINVAL;
			}
		}
			
		handle = NULL;
		maxprot = VM_PROT_ALL;
		file_pos = 0;
		mapanon = 1;
	} else {
		struct vnode_attr va;
		vfs_context_t ctx = vfs_context_current();

		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		err = fp_lookup(p, fd, &fp, 0);
		if (err)
			return(err);
		fpref = 1;
		if(fp->f_fglob->fg_type == DTYPE_PSXSHM) {
			uap->addr = (user_addr_t)user_addr;
			uap->len = (user_size_t)user_size;
			uap->prot = prot;
			uap->flags = flags;
			uap->pos = file_pos;
			error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
			goto bad;
		}

		if (fp->f_fglob->fg_type != DTYPE_VNODE) {
			error = EINVAL;
			goto bad;
		}
		vp = (struct vnode *)fp->f_fglob->fg_data;
		error = vnode_getwithref(vp);
		if(error != 0)
			goto bad;

		if (vp->v_type != VREG && vp->v_type != VCHR) {
			(void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		
		/*
		 * POSIX: mmap needs to update access time for mapped files
		 */
		if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
			VATTR_INIT(&va);
			nanotime(&va.va_access_time);
			VATTR_SET_ACTIVE(&va, va_access_time);
			vnode_setattr(vp, &va, ctx);
		}
		
		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR || vp->v_type == VSTR) {
			(void)vnode_put(vp);
			error = ENODEV;
			goto bad;
		} else {
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_fglob->fg_flag & FREAD)
				maxprot |= VM_PROT_READ;
			else if (prot & PROT_READ) {
				(void)vnode_put(vp);
				error = EACCES;
				goto bad;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out. 
			 */

			if ((flags & MAP_SHARED) != 0) {
				if ((fp->f_fglob->fg_flag & FWRITE) != 0) {
 					/*
 					 * check for write access
 					 *
 					 * Note that we already made this check when granting FWRITE
 					 * against the file, so it seems redundant here.
 					 */
 					error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
 
 					/* if not granted for any reason, but we wanted it, bad */
 					if ((prot & PROT_WRITE) && (error != 0)) {
 						vnode_put(vp);
  						goto bad;
  					}
 
 					/* if writable, remember */
 					if (error == 0)
  						maxprot |= VM_PROT_WRITE;

				} else if ((prot & PROT_WRITE) != 0) {
					(void)vnode_put(vp);
					error = EACCES;
					goto bad;
				}
			} else
				maxprot |= VM_PROT_WRITE;

			handle = (void *)vp;
#if CONFIG_MACF
			error = mac_file_check_mmap(vfs_context_ucred(ctx),
			    fp->f_fglob, prot, flags, &maxprot);
			if (error) {
				(void)vnode_put(vp);
				goto bad;
			}
#endif /* MAC */
		}
	}

	if (user_size == 0)  {
		if (!mapanon)
			(void)vnode_put(vp);
		error = 0;
		goto bad;
	}

	/*
	 *	We bend a little - round the start and end addresses
	 *	to the nearest page boundary.
	 */
	user_size = mach_vm_round_page(user_size);

	if (file_pos & PAGE_MASK_64) {
		if (!mapanon)
			(void)vnode_put(vp);
		error = EINVAL;
		goto bad;
	}

	user_map = current_map();

	if ((flags & MAP_FIXED) == 0) {
		alloc_flags |= VM_FLAGS_ANYWHERE;
		user_addr = mach_vm_round_page(user_addr);
	} else {
		if (user_addr != mach_vm_trunc_page(user_addr)) {
		        if (!mapanon)
			        (void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}
		/*
		 * mmap(MAP_FIXED) will replace any existing mappings in the
		 * specified range, if the new mapping is successful.
		 * If we just deallocate the specified address range here,
		 * another thread might jump in and allocate memory in that
		 * range before we get a chance to establish the new mapping,
		 * and we won't have a chance to restore the old mappings.
		 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
		 * has to deallocate the existing mappings and establish the
		 * new ones atomically.
		 */
		alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
	}

	if (flags & MAP_NOCACHE)
		alloc_flags |= VM_FLAGS_NO_CACHE;

	/*
	 * Lookup/allocate object.
	 */
	if (handle == NULL) {
		pager = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 IPC_PORT_NULL, 0, FALSE,
						 prot, maxprot,
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);
		if (result != KERN_SUCCESS) 
				goto out;
	} else {
		pager = (vm_pager_t)ubc_getpager(vp);
		
		if (pager == NULL) {
			(void)vnode_put(vp);
			error = ENOMEM;
			goto bad;
		}

		/*
		 *  Set credentials:
		 *	FIXME: if we're writing the file we need a way to
		 *      ensure that someone doesn't replace our R/W creds
		 * 	with ones that only work for read.
		 */

		ubc_setthreadcred(vp, p, current_thread());
		docow = FALSE;
		if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
			docow = TRUE;
		}

#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 (ipc_port_t)pager, file_pos,
						 docow, prot, maxprot, 
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		if (result != KERN_SUCCESS)  {
				(void)vnode_put(vp);
				goto out;
		}

		file_prot = prot & (PROT_READ | PROT_WRITE | PROT_EXEC);
		if (docow) {
			/* private mapping: won't write to the file */
			file_prot &= ~PROT_WRITE;
		}
		(void) ubc_map(vp, file_prot);
	}

	if (!mapanon)
		(void)vnode_put(vp);

out:
	switch (result) {
	case KERN_SUCCESS:
		*retval = user_addr + pageoff;
		error = 0;
		break;
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		error =  ENOMEM;
		break;
	case KERN_PROTECTION_FAILURE:
		error =  EACCES;
		break;
	default:
		error =  EINVAL;
		break;
	}
bad:
	if (fpref)
		fp_drop(p, fd, fp, 0);

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
			      (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);

	return(error);
}
示例#2
0
void
KLDBootstrap::readPrelinkedExtensions(
    kernel_section_t * prelinkInfoSect)
{
    OSArray                   * infoDictArray           = NULL;  // do not release
    OSArray                   * personalitiesArray      = NULL;  // do not release
    OSObject                  * parsedXML       = NULL;  // must release
    OSDictionary              * prelinkInfoDict         = NULL;  // do not release
    OSString                  * errorString             = NULL;  // must release
    OSKext                    * theKernel               = NULL;  // must release

#if CONFIG_KXLD
    kernel_section_t          * kernelLinkStateSection  = NULL;  // see code
#endif
    kernel_segment_command_t  * prelinkLinkStateSegment = NULL;  // see code
    kernel_segment_command_t  * prelinkTextSegment      = NULL;  // see code
    kernel_segment_command_t  * prelinkInfoSegment      = NULL;  // see code

   /* We make some copies of data, but if anything fails we're basically
    * going to fail the boot, so these won't be cleaned up on error.
    */
    void                      * prelinkData             = NULL;  // see code
    void                      * prelinkCopy             = NULL;  // see code
    vm_size_t                   prelinkLength           = 0;
#if !__LP64__ && !defined(__arm__)
    vm_map_offset_t             prelinkDataMapOffset    = 0;
#endif

    kern_return_t               mem_result              = KERN_SUCCESS;

    OSDictionary              * infoDict                = NULL;  // do not release

    IORegistryEntry           * registryRoot            = NULL;  // do not release
    OSNumber                  * prelinkCountObj         = NULL;  // must release

    u_int                       i = 0;

    OSKextLog(/* kext */ NULL,
        kOSKextLogProgressLevel |
        kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
        "Starting from prelinked kernel.");

   /*****
    * Wrap the kernel link state in-place in an OSData.
    * This is unnecessary (and the link state may not be present) if the kernel
    * does not have kxld support because this information is only used for
    * runtime linking.
    */
#if CONFIG_KXLD
    kernelLinkStateSection = getsectbyname(kPrelinkLinkStateSegment,
        kPrelinkKernelLinkStateSection);
    if (!kernelLinkStateSection) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogArchiveFlag,
            "Can't find prelinked kernel link state.");
        goto finish;
    }

    theKernel = OSKext::lookupKextWithIdentifier(kOSKextKernelIdentifier);
    if (!theKernel) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogArchiveFlag,
            "Can't find kernel kext object in prelinked kernel.");
        goto finish;
    }

    prelinkData = (void *) kernelLinkStateSection->addr;
    prelinkLength = kernelLinkStateSection->size;

    mem_result = kmem_alloc_pageable(kernel_map,
        (vm_offset_t *) &prelinkCopy, prelinkLength);
    if (mem_result != KERN_SUCCESS) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't copy prelinked kernel link state.");
        goto finish;
    }
    memcpy(prelinkCopy, prelinkData, prelinkLength);

    theKernel->linkState = OSData::withBytesNoCopy(prelinkCopy, prelinkLength);
    if (!theKernel->linkState) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't create prelinked kernel link state wrapper.");
        goto finish;
    }
    theKernel->linkState->setDeallocFunction(osdata_kmem_free);
#endif

    prelinkTextSegment = getsegbyname(kPrelinkTextSegment);
    if (!prelinkTextSegment) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
            "Can't find prelinked kexts' text segment.");
        goto finish;
    }

    prelinkData = (void *) prelinkTextSegment->vmaddr;
    prelinkLength = prelinkTextSegment->vmsize;

#if !__LP64__
    /* To enable paging and write/execute protections on the kext
     * executables, we need to copy them out of the booter-created
     * memory, reallocate that space with VM, then prelinkCopy them back in.
     * This isn't necessary on LP64 because kexts have their own VM
     * region on that architecture model.
     */

    mem_result = kmem_alloc(kernel_map, (vm_offset_t *)&prelinkCopy,
        prelinkLength);
    if (mem_result != KERN_SUCCESS) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't copy prelinked kexts' text for VM reassign.");
        goto finish;
    }

   /* Copy it out.
    */
    memcpy(prelinkCopy, prelinkData, prelinkLength);
    
   /* Dump the booter memory.
    */
    ml_static_mfree((vm_offset_t)prelinkData, prelinkLength);

   /* Set up the VM region.
    */
    prelinkDataMapOffset = (vm_map_offset_t)(uintptr_t)prelinkData;
    mem_result = vm_map_enter_mem_object(
        kernel_map,
        &prelinkDataMapOffset,
        prelinkLength, /* mask */ 0, 
        VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, 
        (ipc_port_t)NULL,
        (vm_object_offset_t) 0,
        /* copy */ FALSE,
        /* cur_protection */ VM_PROT_ALL,
        /* max_protection */ VM_PROT_ALL,
        /* inheritance */ VM_INHERIT_DEFAULT);
    if ((mem_result != KERN_SUCCESS) || 
        (prelinkTextSegment->vmaddr != prelinkDataMapOffset)) 
    {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogGeneralFlag | kOSKextLogArchiveFlag,
            "Can't create kexts' text VM entry at 0x%llx, length 0x%x (error 0x%x).",
            (unsigned long long) prelinkDataMapOffset, prelinkLength, mem_result);
        goto finish;
    }
    prelinkData = (void *)(uintptr_t)prelinkDataMapOffset;

   /* And copy it back.
    */
    memcpy(prelinkData, prelinkCopy, prelinkLength);

    kmem_free(kernel_map, (vm_offset_t)prelinkCopy, prelinkLength);
#endif /* !__LP64__ */

   /* Unserialize the info dictionary from the prelink info section.
    */
    parsedXML = OSUnserializeXML((const char *)prelinkInfoSect->addr,
        &errorString);
    if (parsedXML) {
        prelinkInfoDict = OSDynamicCast(OSDictionary, parsedXML);
    }
    if (!prelinkInfoDict) {
        const char * errorCString = "(unknown error)";
        
        if (errorString && errorString->getCStringNoCopy()) {
            errorCString = errorString->getCStringNoCopy();
        } else if (parsedXML) {
            errorCString = "not a dictionary";
        }
        OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag,
            "Error unserializing prelink plist: %s.", errorCString);
        goto finish;
    }

    infoDictArray = OSDynamicCast(OSArray, 
        prelinkInfoDict->getObject(kPrelinkInfoDictionaryKey));
    if (!infoDictArray) {
        OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag,
            "The prelinked kernel has no kext info dictionaries");
        goto finish;
    }

   /* Create OSKext objects for each info dictionary.
    */
    for (i = 0; i < infoDictArray->getCount(); ++i) {
        infoDict = OSDynamicCast(OSDictionary, infoDictArray->getObject(i));
        if (!infoDict) {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
                "Can't find info dictionary for prelinked kext #%d.", i);
            continue;
        }

       /* Create the kext for the entry, then release it, because the
        * kext system keeps them around until explicitly removed.
        * Any creation/registration failures are already logged for us.
        */
        OSKext * newKext = OSKext::withPrelinkedInfoDict(infoDict);
        OSSafeReleaseNULL(newKext);
    }
    
    /* Get all of the personalities for kexts that were not prelinked and
     * add them to the catalogue.
     */
    personalitiesArray = OSDynamicCast(OSArray,
        prelinkInfoDict->getObject(kPrelinkPersonalitiesKey));
    if (!personalitiesArray) {
        OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag,
            "The prelinked kernel has no personalities array");
        goto finish;
    }

    if (personalitiesArray->getCount()) {
        OSKext::setPrelinkedPersonalities(personalitiesArray);
    }

   /* Store the number of prelinked kexts in the registry so we can tell
    * when the system has been started from a prelinked kernel.
    */
    registryRoot = IORegistryEntry::getRegistryRoot();
    assert(registryRoot);

    prelinkCountObj = OSNumber::withNumber(
        (unsigned long long)infoDictArray->getCount(),
        8 * sizeof(uint32_t));
    assert(prelinkCountObj);
    if (prelinkCountObj) {
        registryRoot->setProperty(kOSPrelinkKextCountKey, prelinkCountObj);
    }

    OSSafeReleaseNULL(prelinkCountObj);
    prelinkCountObj = OSNumber::withNumber(
        (unsigned long long)personalitiesArray->getCount(),
        8 * sizeof(uint32_t));
    assert(prelinkCountObj);
    if (prelinkCountObj) {
        registryRoot->setProperty(kOSPrelinkPersonalityCountKey, prelinkCountObj);
    }

    OSKextLog(/* kext */ NULL,
        kOSKextLogProgressLevel |
        kOSKextLogGeneralFlag | kOSKextLogKextBookkeepingFlag |
        kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag,
        "%u prelinked kexts, and %u additional personalities.", 
        infoDictArray->getCount(), personalitiesArray->getCount());

#if __LP64__
        /* On LP64 systems, kexts are copied to their own special VM region
         * during OSKext init time, so we can free the whole segment now.
         */
        ml_static_mfree((vm_offset_t) prelinkData, prelinkLength);
#endif /* __LP64__ */

   /* Free the link state segment, kexts have copied out what they need.
    */
    prelinkLinkStateSegment = getsegbyname(kPrelinkLinkStateSegment);
    if (prelinkLinkStateSegment) {
        ml_static_mfree((vm_offset_t)prelinkLinkStateSegment->vmaddr,
            (vm_size_t)prelinkLinkStateSegment->vmsize);
    }

   /* Free the prelink info segment, we're done with it.
    */
    prelinkInfoSegment = getsegbyname(kPrelinkInfoSegment);
    if (prelinkInfoSegment) {
        ml_static_mfree((vm_offset_t)prelinkInfoSegment->vmaddr,
            (vm_size_t)prelinkInfoSegment->vmsize);
    }

finish:
    OSSafeRelease(errorString);
    OSSafeRelease(parsedXML);
    OSSafeRelease(theKernel);
    OSSafeRelease(prelinkCountObj);
    return;
}