コード例 #1
0
ファイル: bsd_vm.c プロジェクト: SbIm/xnu-env
kern_return_t
vnode_pager_map(
	memory_object_t		mem_obj,
	vm_prot_t		prot)
{
	vnode_pager_t		vnode_object;
	int			ret;
	kern_return_t		kr;

	PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));

	vnode_object = vnode_pager_lookup(mem_obj);

	ret = ubc_map(vnode_object->vnode_handle, prot);

	if (ret != 0) {
		kr = KERN_FAILURE;
	} else {
		kr = KERN_SUCCESS;
	}

	return kr;
}
コード例 #2
0
/*
 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
 * XXX usage is PROT_* from an interface perspective.  Thus the values of
 * XXX VM_PROT_* and PROT_* need to correspond.
 */
int
mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
{
	/*
	 *	Map in special device (must be SHARED) or file
	 */
	struct fileproc *fp;
	register struct		vnode *vp;
	int			flags;
	int			prot, file_prot;
	int			err=0;
	vm_map_t		user_map;
	kern_return_t		result;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t		user_size;
	vm_object_offset_t	pageoff;
	vm_object_offset_t	file_pos;
	int			alloc_flags=0;
	boolean_t		docow;
	vm_prot_t		maxprot;
	void 			*handle;
	vm_pager_t		pager;
	int 			mapanon=0;
	int 			fpref=0;
	int error =0;
	int fd = uap->fd;

	user_addr = (mach_vm_offset_t)uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);
	AUDIT_ARG(fd, uap->fd);

	prot = (uap->prot & VM_PROT_ALL);
#if 3777787
	/*
	 * Since the hardware currently does not support writing without
	 * read-before-write, or execution-without-read, if the request is
	 * for write or execute access, we must imply read access as well;
	 * otherwise programs expecting this to work will fail to operate.
	 */
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* radar 3777787 */

	flags = uap->flags;
	vp = NULLVP;

	/*
	 * The vm code does not have prototypes & compiler doesn't do the'
	 * the right thing when you cast 64bit value and pass it in function 
	 * call. So here it is.
	 */
	file_pos = (vm_object_offset_t)uap->pos;


	/* make sure mapping fits into numeric range etc */
	if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
		return (EINVAL);

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (file_pos & PAGE_MASK);
	file_pos -= (vm_object_offset_t)pageoff;


	/* Adjust size for rounding (on both ends). */
	user_size += pageoff;			/* low end... */
	user_size = mach_vm_round_page(user_size);	/* hi end */


	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & MAP_FIXED) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		user_addr -= pageoff;
		if (user_addr & PAGE_MASK)
		return (EINVAL);
	}
#ifdef notyet
	/* DO not have apis to get this info, need to wait till then*/
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
		addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);

#endif

	alloc_flags = 0;

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.  Use positive fds as the alias
		 * value for memory tracking. 
		 */
		if (fd != -1) {
			/*
			 * Use "fd" to pass (some) Mach VM allocation flags,
			 * (see the VM_FLAGS_* definitions).
			 */
			alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
					    VM_FLAGS_PURGABLE);
			if (alloc_flags != fd) {
				/* reject if there are any extra flags */
				return EINVAL;
			}
		}
			
		handle = NULL;
		maxprot = VM_PROT_ALL;
		file_pos = 0;
		mapanon = 1;
	} else {
		struct vnode_attr va;
		vfs_context_t ctx = vfs_context_current();

		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		err = fp_lookup(p, fd, &fp, 0);
		if (err)
			return(err);
		fpref = 1;
		if(fp->f_fglob->fg_type == DTYPE_PSXSHM) {
			uap->addr = (user_addr_t)user_addr;
			uap->len = (user_size_t)user_size;
			uap->prot = prot;
			uap->flags = flags;
			uap->pos = file_pos;
			error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
			goto bad;
		}

		if (fp->f_fglob->fg_type != DTYPE_VNODE) {
			error = EINVAL;
			goto bad;
		}
		vp = (struct vnode *)fp->f_fglob->fg_data;
		error = vnode_getwithref(vp);
		if(error != 0)
			goto bad;

		if (vp->v_type != VREG && vp->v_type != VCHR) {
			(void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		
		/*
		 * POSIX: mmap needs to update access time for mapped files
		 */
		if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
			VATTR_INIT(&va);
			nanotime(&va.va_access_time);
			VATTR_SET_ACTIVE(&va, va_access_time);
			vnode_setattr(vp, &va, ctx);
		}
		
		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR || vp->v_type == VSTR) {
			(void)vnode_put(vp);
			error = ENODEV;
			goto bad;
		} else {
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_fglob->fg_flag & FREAD)
				maxprot |= VM_PROT_READ;
			else if (prot & PROT_READ) {
				(void)vnode_put(vp);
				error = EACCES;
				goto bad;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out. 
			 */

			if ((flags & MAP_SHARED) != 0) {
				if ((fp->f_fglob->fg_flag & FWRITE) != 0) {
 					/*
 					 * check for write access
 					 *
 					 * Note that we already made this check when granting FWRITE
 					 * against the file, so it seems redundant here.
 					 */
 					error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
 
 					/* if not granted for any reason, but we wanted it, bad */
 					if ((prot & PROT_WRITE) && (error != 0)) {
 						vnode_put(vp);
  						goto bad;
  					}
 
 					/* if writable, remember */
 					if (error == 0)
  						maxprot |= VM_PROT_WRITE;

				} else if ((prot & PROT_WRITE) != 0) {
					(void)vnode_put(vp);
					error = EACCES;
					goto bad;
				}
			} else
				maxprot |= VM_PROT_WRITE;

			handle = (void *)vp;
#if CONFIG_MACF
			error = mac_file_check_mmap(vfs_context_ucred(ctx),
			    fp->f_fglob, prot, flags, &maxprot);
			if (error) {
				(void)vnode_put(vp);
				goto bad;
			}
#endif /* MAC */
		}
	}

	if (user_size == 0)  {
		if (!mapanon)
			(void)vnode_put(vp);
		error = 0;
		goto bad;
	}

	/*
	 *	We bend a little - round the start and end addresses
	 *	to the nearest page boundary.
	 */
	user_size = mach_vm_round_page(user_size);

	if (file_pos & PAGE_MASK_64) {
		if (!mapanon)
			(void)vnode_put(vp);
		error = EINVAL;
		goto bad;
	}

	user_map = current_map();

	if ((flags & MAP_FIXED) == 0) {
		alloc_flags |= VM_FLAGS_ANYWHERE;
		user_addr = mach_vm_round_page(user_addr);
	} else {
		if (user_addr != mach_vm_trunc_page(user_addr)) {
		        if (!mapanon)
			        (void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}
		/*
		 * mmap(MAP_FIXED) will replace any existing mappings in the
		 * specified range, if the new mapping is successful.
		 * If we just deallocate the specified address range here,
		 * another thread might jump in and allocate memory in that
		 * range before we get a chance to establish the new mapping,
		 * and we won't have a chance to restore the old mappings.
		 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
		 * has to deallocate the existing mappings and establish the
		 * new ones atomically.
		 */
		alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
	}

	if (flags & MAP_NOCACHE)
		alloc_flags |= VM_FLAGS_NO_CACHE;

	/*
	 * Lookup/allocate object.
	 */
	if (handle == NULL) {
		pager = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 IPC_PORT_NULL, 0, FALSE,
						 prot, maxprot,
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);
		if (result != KERN_SUCCESS) 
				goto out;
	} else {
		pager = (vm_pager_t)ubc_getpager(vp);
		
		if (pager == NULL) {
			(void)vnode_put(vp);
			error = ENOMEM;
			goto bad;
		}

		/*
		 *  Set credentials:
		 *	FIXME: if we're writing the file we need a way to
		 *      ensure that someone doesn't replace our R/W creds
		 * 	with ones that only work for read.
		 */

		ubc_setthreadcred(vp, p, current_thread());
		docow = FALSE;
		if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
			docow = TRUE;
		}

#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 (ipc_port_t)pager, file_pos,
						 docow, prot, maxprot, 
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		if (result != KERN_SUCCESS)  {
				(void)vnode_put(vp);
				goto out;
		}

		file_prot = prot & (PROT_READ | PROT_WRITE | PROT_EXEC);
		if (docow) {
			/* private mapping: won't write to the file */
			file_prot &= ~PROT_WRITE;
		}
		(void) ubc_map(vp, file_prot);
	}

	if (!mapanon)
		(void)vnode_put(vp);

out:
	switch (result) {
	case KERN_SUCCESS:
		*retval = user_addr + pageoff;
		error = 0;
		break;
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		error =  ENOMEM;
		break;
	case KERN_PROTECTION_FAILURE:
		error =  EACCES;
		break;
	default:
		error =  EINVAL;
		break;
	}
bad:
	if (fpref)
		fp_drop(p, fd, fp, 0);

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
			      (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);

	return(error);
}
コード例 #3
0
kern_return_t
map_fd_funneled(
	int			fd,
	vm_object_offset_t	offset,
	vm_offset_t		*va,
	boolean_t		findspace,
	vm_size_t		size)
{
	kern_return_t	result;
	struct fileproc	*fp;
	struct vnode	*vp;
	void *	pager;
	vm_offset_t	map_addr=0;
	vm_size_t	map_size;
	int		err=0;
	vm_map_t	my_map;
	proc_t		p = current_proc();
	struct vnode_attr vattr;

	/*
	 *	Find the inode; verify that it's a regular file.
	 */

	err = fp_lookup(p, fd, &fp, 0);
	if (err)
		return(err);
	
	if (fp->f_fglob->fg_type != DTYPE_VNODE){
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	if (!(fp->f_fglob->fg_flag & FREAD)) {
		err = KERN_PROTECTION_FAILURE;
		goto bad;
	}

	vp = (struct vnode *)fp->f_fglob->fg_data;
	err = vnode_getwithref(vp);
	if(err != 0) 
		goto bad;

	if (vp->v_type != VREG) {
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	AUDIT_ARG(vnpath, vp, ARG_VNODE1);

	/*
	 * POSIX: mmap needs to update access time for mapped files
	 */
	if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
		VATTR_INIT(&vattr);
		nanotime(&vattr.va_access_time);
		VATTR_SET_ACTIVE(&vattr, va_access_time);
		vnode_setattr(vp, &vattr, vfs_context_current());
	}
	
	if (offset & PAGE_MASK_64) {
		printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}
	map_size = round_page(size);

	/*
	 * Allow user to map in a zero length file.
	 */
	if (size == 0) {
		(void)vnode_put(vp);
		err = KERN_SUCCESS;
		goto bad;
	}
	/*
	 *	Map in the file.
	 */
	pager = (void *)ubc_getpager(vp);
	if (pager == NULL) {
		(void)vnode_put(vp);
		err = KERN_FAILURE;
		goto bad;
	}


	my_map = current_map();

	result = vm_map_64(
			my_map,
			&map_addr, map_size, (vm_offset_t)0, 
			VM_FLAGS_ANYWHERE, pager, offset, TRUE,
			VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (result != KERN_SUCCESS) {
		(void)vnode_put(vp);
		err = result;
		goto bad;
	}


	if (!findspace) {
		vm_offset_t	dst_addr;
		vm_map_copy_t	tmp;

		if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr))	||
					trunc_page_32(dst_addr) != dst_addr) {
			(void) vm_map_remove(
					my_map,
					map_addr, map_addr + map_size,
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}

		result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
				       (vm_map_size_t)map_size, TRUE, &tmp);
		if (result != KERN_SUCCESS) {
			
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}

		result = vm_map_copy_overwrite(my_map,
					(vm_map_address_t)dst_addr, tmp, FALSE);
		if (result != KERN_SUCCESS) {
			vm_map_copy_discard(tmp);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}
	} else {
		if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}
	}

	ubc_setthreadcred(vp, current_proc(), current_thread());
	(void)ubc_map(vp, (PROT_READ | PROT_EXEC));
	(void)vnode_put(vp);
	err = 0;
bad:
	fp_drop(p, fd, fp, 0);
	return (err);
}
コード例 #4
0
/*
 * shared_region_map_np()
 *
 * This system call is intended for dyld.
 *
 * dyld uses this to map a shared cache file into a shared region.
 * This is usually done only the first time a shared cache is needed.
 * Subsequent processes will just use the populated shared region without
 * requiring any further setup.
 */
int
shared_region_map_np(
	struct proc				*p,
	struct shared_region_map_np_args	*uap,
	__unused int				*retvalp)
{
	int				error;
	kern_return_t			kr;
	int				fd;
	struct fileproc			*fp;
	struct vnode			*vp, *root_vp;
	struct vnode_attr		va;
	off_t				fs;
	memory_object_size_t		file_size;
	user_addr_t			user_mappings;
	struct shared_file_mapping_np	*mappings;
#define SFM_MAX_STACK	8
	struct shared_file_mapping_np	stack_mappings[SFM_MAX_STACK];
	unsigned int			mappings_count;
	vm_size_t			mappings_size;
	memory_object_control_t		file_control;
	struct vm_shared_region		*shared_region;

	SHARED_REGION_TRACE_DEBUG(
		("shared_region: %p [%d(%s)] -> map\n",
		 current_thread(), p->p_pid, p->p_comm));

	shared_region = NULL;
	mappings_count = 0;
	mappings_size = 0;
	mappings = NULL;
	fp = NULL;
	vp = NULL;

	/* get file descriptor for shared region cache file */
	fd = uap->fd;

	/* get file structure from file descriptor */
	error = fp_lookup(p, fd, &fp, 0);
	if (error) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map: "
			 "fd=%d lookup failed (error=%d)\n",
			 current_thread(), p->p_pid, p->p_comm, fd, error));
		goto done;
	}

	/* make sure we're attempting to map a vnode */
	if (fp->f_fglob->fg_type != DTYPE_VNODE) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map: "
			 "fd=%d not a vnode (type=%d)\n",
			 current_thread(), p->p_pid, p->p_comm,
			 fd, fp->f_fglob->fg_type));
		error = EINVAL;
		goto done;
	}

	/* we need at least read permission on the file */
	if (! (fp->f_fglob->fg_flag & FREAD)) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map: "
			 "fd=%d not readable\n",
			 current_thread(), p->p_pid, p->p_comm, fd));
		error = EPERM;
		goto done;
	}

	/* get vnode from file structure */
	error = vnode_getwithref((vnode_t) fp->f_fglob->fg_data);
	if (error) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map: "
			 "fd=%d getwithref failed (error=%d)\n",
			 current_thread(), p->p_pid, p->p_comm, fd, error));
		goto done;
	}
	vp = (struct vnode *) fp->f_fglob->fg_data;

	/* make sure the vnode is a regular file */
	if (vp->v_type != VREG) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "not a file (type=%d)\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, vp->v_type));
		error = EINVAL;
		goto done;
	}

	/* make sure vnode is on the process's root volume */
	root_vp = p->p_fd->fd_rdir;
	if (root_vp == NULL) {
		root_vp = rootvnode;
	}
	if (vp->v_mount != root_vp->v_mount) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "not on process's root volume\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name));
		error = EPERM;
		goto done;
	}

	/* make sure vnode is owned by "root" */
	VATTR_INIT(&va);
	VATTR_WANTED(&va, va_uid);
	error = vnode_getattr(vp, &va, vfs_context_current());
	if (error) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "vnode_getattr(%p) failed (error=%d)\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, vp, error));
		goto done;
	}
	if (va.va_uid != 0) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "owned by uid=%d instead of 0\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, va.va_uid));
		error = EPERM;
		goto done;
	}

	/* get vnode size */
	error = vnode_size(vp, &fs, vfs_context_current());
	if (error) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "vnode_size(%p) failed (error=%d)\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, vp, error));
		goto done;
	}
	file_size = fs;

	/* get the file's memory object handle */
	file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
	if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "no memory object\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name));
		error = EINVAL;
		goto done;
	}
			 
	/* get the list of mappings the caller wants us to establish */
	mappings_count = uap->count;	/* number of mappings */
	mappings_size = (vm_size_t) (mappings_count * sizeof (mappings[0]));
	if (mappings_count == 0) {
		SHARED_REGION_TRACE_INFO(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "no mappings\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name));
		error = 0;	/* no mappings: we're done ! */
		goto done;
	} else if (mappings_count <= SFM_MAX_STACK) {
		mappings = &stack_mappings[0];
	} else {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "too many mappings (%d)\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, mappings_count));
		error = EINVAL;
		goto done;
	}

	user_mappings = uap->mappings;	/* the mappings, in user space */
	error = copyin(user_mappings,
		       mappings,
		       mappings_size);
	if (error) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "copyin(0x%llx, %d) failed (error=%d)\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, (uint64_t)user_mappings, mappings_count, error));
		goto done;
	}

	/* get the process's shared region (setup in vm_map_exec()) */
	shared_region = vm_shared_region_get(current_task());
	if (shared_region == NULL) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "no shared region\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name));
		goto done;
	}

	/* map the file into that shared region's submap */
	kr = vm_shared_region_map_file(shared_region,
				       mappings_count,
				       mappings,
				       file_control,
				       file_size,
				       (void *) p->p_fd->fd_rdir);
	if (kr != KERN_SUCCESS) {
		SHARED_REGION_TRACE_ERROR(
			("shared_region: %p [%d(%s)] map(%p:'%s'): "
			 "vm_shared_region_map_file() failed kr=0x%x\n",
			 current_thread(), p->p_pid, p->p_comm,
			 vp, vp->v_name, kr));
		switch (kr) {
		case KERN_INVALID_ADDRESS:
			error = EFAULT;
			break;
		case KERN_PROTECTION_FAILURE:
			error = EPERM;
			break;
		case KERN_NO_SPACE:
			error = ENOMEM;
			break;
		case KERN_FAILURE:
		case KERN_INVALID_ARGUMENT:
		default:
			error = EINVAL;
			break;
		}
		goto done;
	}

	/*
	 * The mapping was successful.  Let the buffer cache know
	 * that we've mapped that file with these protections.  This
	 * prevents the vnode from getting recycled while it's mapped.
	 */
	(void) ubc_map(vp, VM_PROT_READ);
	error = 0;

	/* update the vnode's access time */
	if (! (vnode_vfsvisflags(vp) & MNT_NOATIME)) {
		VATTR_INIT(&va);
		nanotime(&va.va_access_time);
		VATTR_SET_ACTIVE(&va, va_access_time);
		vnode_setattr(vp, &va, vfs_context_current());
	}

	if (p->p_flag & P_NOSHLIB) {
		/* signal that this process is now using split libraries */
		OSBitAndAtomic(~((uint32_t)P_NOSHLIB), (UInt32 *)&p->p_flag);
	}

done:
	if (vp != NULL) {
		/*
		 * release the vnode...
		 * ubc_map() still holds it for us in the non-error case
		 */
		(void) vnode_put(vp);
		vp = NULL;
	}
	if (fp != NULL) {
		/* release the file descriptor */
		fp_drop(p, fd, fp, 0);
		fp = NULL;
	}

	if (shared_region != NULL) {
		vm_shared_region_deallocate(shared_region);
	}

	SHARED_REGION_TRACE_DEBUG(
		("shared_region: %p [%d(%s)] <- map\n",
		 current_thread(), p->p_pid, p->p_comm));

	return error;
}
コード例 #5
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands.
 */
static
load_return_t
parse_machfile(
    struct vnode 		*vp,
    vm_map_t		map,
    thread_t		thread,
    struct mach_header	*header,
    off_t			file_offset,
    off_t			macho_size,
    int			depth,
    load_result_t		*result
)
{
    uint32_t		ncmds;
    struct load_command	*lcp;
    struct dylinker_command	*dlp = 0;
    integer_t		dlarchbits = 0;
    void *			pager;
    load_return_t		ret = LOAD_SUCCESS;
    caddr_t			addr;
    void *			kl_addr;
    vm_size_t		size,kl_size;
    size_t			offset;
    size_t			oldoffset;	/* for overflow check */
    int			pass;
    proc_t			p = current_proc();		/* XXXX */
    int			error;
    int resid=0;
    task_t task;
    size_t			mach_header_sz = sizeof(struct mach_header);
    boolean_t		abi64;
    boolean_t		got_code_signatures = FALSE;

    if (header->magic == MH_MAGIC_64 ||
            header->magic == MH_CIGAM_64) {
        mach_header_sz = sizeof(struct mach_header_64);
    }

    /*
     *	Break infinite recursion
     */
    if (depth > 6) {
        return(LOAD_FAILURE);
    }

    task = (task_t)get_threadtask(thread);

    depth++;

    /*
     *	Check to see if right machine type.
     */
    if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
            !grade_binary(header->cputype,
                          header->cpusubtype & ~CPU_SUBTYPE_MASK))
        return(LOAD_BADARCH);

    abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);

    switch (header->filetype) {

    case MH_OBJECT:
    case MH_EXECUTE:
    case MH_PRELOAD:
        if (depth != 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_FVMLIB:
    case MH_DYLIB:
        if (depth == 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_DYLINKER:
        if (depth != 2) {
            return (LOAD_FAILURE);
        }
        break;

    default:
        return (LOAD_FAILURE);
    }

    /*
     *	Get the pager for the file.
     */
    pager = (void *) ubc_getpager(vp);

    /*
     *	Map portion that must be accessible directly into
     *	kernel's map.
     */
    if ((mach_header_sz + header->sizeofcmds) > macho_size)
        return(LOAD_BADMACHO);

    /*
     *	Round size of Mach-O commands up to page boundry.
     */
    size = round_page(mach_header_sz + header->sizeofcmds);
    if (size <= 0)
        return(LOAD_BADMACHO);

    /*
     * Map the load commands into kernel memory.
     */
    addr = 0;
    kl_size = size;
    kl_addr = kalloc(size);
    addr = (caddr_t)kl_addr;
    if (addr == NULL)
        return(LOAD_NOSPACE);

    error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
                    UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p);
    if (error) {
        if (kl_addr )
            kfree(kl_addr, kl_size);
        return(LOAD_IOERROR);
    }
    /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */

    /*
     *	Scan through the commands, processing each one as necessary.
     */
    for (pass = 1; pass <= 2; pass++) {
        /*
         * Loop through each of the load_commands indicated by the
         * Mach-O header; if an absurd value is provided, we just
         * run off the end of the reserved section by incrementing
         * the offset too far, so we are implicitly fail-safe.
         */
        offset = mach_header_sz;
        ncmds = header->ncmds;
        while (ncmds--) {
            /*
             *	Get a pointer to the command.
             */
            lcp = (struct load_command *)(addr + offset);
            oldoffset = offset;
            offset += lcp->cmdsize;

            /*
             * Perform prevalidation of the struct load_command
             * before we attempt to use its contents.  Invalid
             * values are ones which result in an overflow, or
             * which can not possibly be valid commands, or which
             * straddle or exist past the reserved section at the
             * start of the image.
             */
            if (oldoffset > offset ||
                    lcp->cmdsize < sizeof(struct load_command) ||
                    offset > header->sizeofcmds + mach_header_sz) {
                ret = LOAD_BADMACHO;
                break;
            }

            /*
             * Act on struct load_command's for which kernel
             * intervention is required.
             */
            switch(lcp->cmd) {
            case LC_SEGMENT_64:
                if (pass != 1)
                    break;
                ret = load_segment_64(
                          (struct segment_command_64 *)lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_SEGMENT:
                if (pass != 1)
                    break;
                ret = load_segment(
                          (struct segment_command *) lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_THREAD:
                if (pass != 2)
                    break;
                ret = load_thread((struct thread_command *)lcp,
                                  thread,
                                  result);
                break;
            case LC_UNIXTHREAD:
                if (pass != 2)
                    break;
                ret = load_unixthread(
                          (struct thread_command *) lcp,
                          thread,
                          result);
                break;
            case LC_LOAD_DYLINKER:
                if (pass != 2)
                    break;
                if ((depth == 1) && (dlp == 0)) {
                    dlp = (struct dylinker_command *)lcp;
                    dlarchbits = (header->cputype & CPU_ARCH_MASK);
                } else {
                    ret = LOAD_FAILURE;
                }
                break;
            case LC_CODE_SIGNATURE:
                /* CODE SIGNING */
                if (pass != 2)
                    break;
                /* pager -> uip ->
                   load signatures & store in uip
                   set VM object "signed_pages"
                */
                ret = load_code_signature(
                          (struct linkedit_data_command *) lcp,
                          vp,
                          file_offset,
                          macho_size,
                          header->cputype,
                          (depth == 1) ? result : NULL);
                if (ret != LOAD_SUCCESS) {
                    printf("proc %d: load code signature error %d "
                           "for file \"%s\"\n",
                           p->p_pid, ret, vp->v_name);
                    ret = LOAD_SUCCESS; /* ignore error */
                } else {
                    got_code_signatures = TRUE;
                }
                break;
            default:
                /* Other commands are ignored by the kernel */
                ret = LOAD_SUCCESS;
                break;
            }
            if (ret != LOAD_SUCCESS)
                break;
        }
        if (ret != LOAD_SUCCESS)
            break;
    }
    if (ret == LOAD_SUCCESS) {
        if (! got_code_signatures) {
            struct cs_blob *blob;
            /* no embedded signatures: look for detached ones */
            blob = ubc_cs_blob_get(vp, -1, file_offset);
            if (blob != NULL) {
                /* get flags to be applied to the process */
                result->csflags |= blob->csb_flags;
            }
        }

        if (dlp != 0)
            ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);

        if(depth == 1) {
            if (result->thread_count == 0) {
                ret = LOAD_FAILURE;
            } else if ( abi64 ) {
#ifdef __ppc__
                /* Map in 64-bit commpage */
                /* LP64todo - make this clean */
                /*
                 * PPC51: ppc64 is limited to 51-bit addresses.
                 * Memory above that limit is handled specially
                 * at the pmap level.
                 */
                pmap_map_sharedpage(current_task(), get_map_pmap(map));
#endif /* __ppc__ */
            }
        }
    }

    if (kl_addr )
        kfree(kl_addr, kl_size);

    if (ret == LOAD_SUCCESS)
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);

    return(ret);
}
コード例 #6
0
static
load_return_t
load_dylinker(
    struct dylinker_command	*lcp,
    integer_t		archbits,
    vm_map_t		map,
    thread_t	thread,
    int			depth,
    load_result_t		*result,
    boolean_t		is_64bit
)
{
    char			*name;
    char			*p;
    struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
    struct mach_header	header;
    off_t			file_offset = 0; /* set by get_macho_vnode() */
    off_t			macho_size = 0;	/* set by get_macho_vnode() */
    vm_map_t		copy_map;
    load_result_t		myresult;
    kern_return_t		ret;
    vm_map_copy_t	tmp;
    mach_vm_offset_t	dyl_start, map_addr;
    mach_vm_size_t		dyl_length;

    name = (char *)lcp + lcp->name.offset;
    /*
     *	Check for a proper null terminated string.
     */
    p = name;
    do {
        if (p >= (char *)lcp + lcp->cmdsize)
            return(LOAD_BADMACHO);
    } while (*p++);

    ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
    if (ret)
        return (ret);

    myresult = load_result_null;

    /*
     *	First try to map dyld in directly.  This should work most of
     *	the time since there shouldn't normally be something already
     *	mapped to its address.
     */

    ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
                         depth, &myresult);

    /*
     *	If it turned out something was in the way, then we'll take
     *	take this longer path to map dyld into a temporary map and
     *	copy it into destination map at a different address.
     */

    if (ret == LOAD_NOSPACE) {

        /*
         *	Load the Mach-O.
         *	Use a temporary map to do the work.
         */
        copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
                                             is_64bit),
                                 get_map_min(map), get_map_max(map), TRUE);
        if (VM_MAP_NULL == copy_map) {
            ret = LOAD_RESOURCE;
            goto out;
        }

        myresult = load_result_null;

        ret = parse_machfile(vp, copy_map, thread, &header,
                             file_offset, macho_size,
                             depth, &myresult);

        if (ret) {
            vm_map_deallocate(copy_map);
            goto out;
        }

        if (get_map_nentries(copy_map) > 0) {

            dyl_start = mach_get_vm_start(copy_map);
            dyl_length = mach_get_vm_end(copy_map) - dyl_start;

            map_addr = dyl_start;
            ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);

            if (ret != KERN_SUCCESS) {
                vm_map_deallocate(copy_map);
                ret = LOAD_NOSPACE;
                goto out;

            }

            ret = vm_map_copyin(copy_map,
                                (vm_map_address_t)dyl_start,
                                (vm_map_size_t)dyl_length,
                                TRUE, &tmp);
            if (ret != KERN_SUCCESS) {
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            ret = vm_map_copy_overwrite(map,
                                        (vm_map_address_t)map_addr,
                                        tmp, FALSE);
            if (ret != KERN_SUCCESS) {
                vm_map_copy_discard(tmp);
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            if (map_addr != dyl_start)
                myresult.entry_point += (map_addr - dyl_start);
        } else {
            ret = LOAD_FAILURE;
        }

        vm_map_deallocate(copy_map);
    }

    if (ret == LOAD_SUCCESS) {
        result->dynlinker = TRUE;
        result->entry_point = myresult.entry_point;
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);
    }
out:
    vnode_put(vp);
    return (ret);

}