Example #1
0
int
munmap(__unused proc_t p, struct munmap_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t	user_addr;
	mach_vm_size_t	user_size;
	kern_return_t	result;

	user_addr = (mach_vm_offset_t) uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);

	if (user_addr & PAGE_MASK_64) {
		/* UNIX SPEC: user address is not page-aligned, return EINVAL */
		return EINVAL;
	}

	if (user_addr + user_size < user_addr)
		return(EINVAL);

	if (user_size == 0) {
		/* UNIX SPEC: size is 0, return EINVAL */
		return EINVAL;
	}

	result = mach_vm_deallocate(current_map(), user_addr, user_size);
	if (result != KERN_SUCCESS) {
		return(EINVAL);
	}
	return(0);
}
Example #2
0
/*
 * pid_for_task
 *
 * Find the BSD process ID for the Mach task associated with the given Mach port 
 * name
 *
 * Parameters:	args		User argument descriptor (see below)
 *
 * Indirect parameters:	args->t		Mach port name
 * 			args->pid	Process ID (returned value; see below)
 *
 * Returns:	KERL_SUCCESS	Success
 * 		KERN_FAILURE	Not success           
 *
 * Implicit returns: args->pid		Process ID
 *
 */
kern_return_t
pid_for_task(
	struct pid_for_task_args *args)
{
	mach_port_name_t	t = args->t;
	user_addr_t		pid_addr  = args->pid;  
	proc_t p;
	task_t		t1;
	int	pid = -1;
	kern_return_t	err = KERN_SUCCESS;

	AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
	AUDIT_ARG(mach_port1, t);

	t1 = port_name_to_task(t);

	if (t1 == TASK_NULL) {
		err = KERN_FAILURE;
		goto pftout;
	} else {
		p = get_bsdtask_info(t1);
		if (p) {
			pid  = proc_pid(p);
			err = KERN_SUCCESS;
		} else {
			err = KERN_FAILURE;
		}
	}
	task_deallocate(t1);
pftout:
	AUDIT_ARG(pid, pid);
	(void) copyout((char *) &pid, pid_addr, sizeof(int));
	AUDIT_MACH_SYSCALL_EXIT(err);
	return(err);
}
Example #3
0
int
minherit(__unused proc_t p, struct minherit_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr;
	mach_vm_size_t size;
	register vm_inherit_t inherit;
	vm_map_t	user_map;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);
	AUDIT_ARG(value, uap->inherit);

	addr = (mach_vm_offset_t)uap->addr;
	size = (mach_vm_size_t)uap->len;
	inherit = uap->inherit;

	user_map = current_map();
	result = mach_vm_inherit(user_map, addr, size,
				inherit);
	switch (result) {
	case KERN_SUCCESS:
		return (0);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	}
	return (EINVAL);
}
Example #4
0
int
shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval)
{
	int error, i, flags;
	struct shmid_kernel	*shmseg;
	struct shmmap_state	*shmmap_s = NULL;
	struct shm_handle	*shm_handle;
	mach_vm_address_t	attach_va;	/* attach address in/out */
	mach_vm_size_t		map_size;	/* size of map entry */
	mach_vm_size_t		mapped_size;
	vm_prot_t           prot;
    size_t              size;
	kern_return_t		rv;
	int			shmat_ret;
	int			vm_flags;

	shmat_ret = 0;

	AUDIT_ARG(svipc_id, uap->shmid);
	AUDIT_ARG(svipc_addr, uap->shmaddr);

	SYSV_SHM_SUBSYS_LOCK();

	if ((shmat_ret = shminit())) {
		goto shmat_out;
	}

	shmmap_s = (struct shmmap_state *)p->vm_shm;
	if (shmmap_s == NULL) {
		/* lazily allocate the shm map */

		int nsegs = shminfo.shmseg;
		if (nsegs <= 0) {
			shmat_ret = EMFILE;
			goto shmat_out;
		}

		/* +1 for the sentinel */
		if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
            shmat_ret = ENOMEM;
            goto shmat_out;
		}

		MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK | M_NULL);
		if (shmmap_s == NULL) {
			shmat_ret = ENOMEM;
			goto shmat_out;
		}

		/* initialize the entries */
		for (i = 0; i < nsegs; i++) {
			shmmap_s[i].shmid = SHMID_UNALLOCATED;
		}
		shmmap_s[i].shmid = SHMID_SENTINEL;

		p->vm_shm = (caddr_t)shmmap_s;
	}
Example #5
0
void
lookup_compound_vnop_post_hook(int error, vnode_t dvp, vnode_t vp, struct nameidata *ndp, int did_create)
{
	if (error == 0 && vp == NULLVP) {
		panic("NULL vp with error == 0.\n");
	}

	/* 
	 * We don't want to do any of this if we didn't use the compound vnop
	 * to perform the lookup... i.e. if we're allowing and using the legacy pattern,
	 * where we did a full lookup.
	 */
	if ((ndp->ni_flag & NAMEI_COMPOUND_OP_MASK) == 0) {
		return;
	}

	/* 
	 * If we're going to continue the lookup, we'll handle
	 * all lookup-related updates at that time.
	 */
	if (error == EKEEPLOOKING) {
		return;
	}

	/*
	 * Only audit or update cache for *found* vnodes.  For creation
	 * neither would happen in the non-compound-vnop case.
	 */
	if ((vp != NULLVP) && !did_create) {
		/* 
		 * If MAKEENTRY isn't set, and we've done a successful compound VNOP, 
		 * then we certainly don't want to update cache or identity.
		 */
		if ((error != 0) || (ndp->ni_cnd.cn_flags & MAKEENTRY)) {
			lookup_consider_update_cache(dvp, vp, &ndp->ni_cnd, ndp->ni_ncgeneration);
		}
		if (ndp->ni_cnd.cn_flags & AUDITVNPATH1)
			AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		else if (ndp->ni_cnd.cn_flags & AUDITVNPATH2)
			AUDIT_ARG(vnpath, vp, ARG_VNODE2);
	}

	/* 
	 * If you created (whether you opened or not), cut a lookup tracepoint 
	 * for the parent dir (as would happen without a compound vnop).  Note: we may need
	 * a vnode despite failure in this case!
	 *
	 * If you did not create:
	 * 	Found child (succeeded or not): cut a tracepoint for the child.  
	 * 	Did not find child: cut a tracepoint with the parent.
	 */
	if (kdebug_enable) {
	        kdebug_lookup(vp ? vp : dvp, &ndp->ni_cnd); 
	}
}
Example #6
0
/*
 * Comes in iocount on ni_vp.  May overwrite ni_dvp, but doesn't interpret incoming value.
 */
int 
lookup_handle_emptyname(struct nameidata *ndp, struct componentname *cnp, int wantparent)
{
	vnode_t dp;
	int error = 0;

	dp = ndp->ni_vp;
	cnp->cn_namelen = 0;
	/*
	 * A degenerate name (e.g. / or "") which is a way of
	 * talking about a directory, e.g. like "/." or ".".
	 */
	if (dp->v_type != VDIR) {
		error = ENOTDIR;
		goto out;
	}
	if (cnp->cn_nameiop != LOOKUP) {
		error = EISDIR;
		goto out;
	}
	if (wantparent) {
	        /*	
		 * note that we already hold a reference
		 * on dp, but for some reason can't
		 * get another one... in this case we
		 * need to do vnode_put on dp in 'bad'
		 */
	        if ( (vnode_get(dp)) ) {
		        error = ENOENT;
			goto out;
		}
		ndp->ni_dvp = dp;
	}
	cnp->cn_flags &= ~ISDOTDOT;
	cnp->cn_flags |= ISLASTCN;
	ndp->ni_next = cnp->cn_nameptr;
	ndp->ni_vp = dp;

	if (cnp->cn_flags & AUDITVNPATH1)
		AUDIT_ARG(vnpath, dp, ARG_VNODE1);
	else if (cnp->cn_flags & AUDITVNPATH2)
		AUDIT_ARG(vnpath, dp, ARG_VNODE2);
	if (cnp->cn_flags & SAVESTART)
		panic("lookup: SAVESTART");

	return 0;
out:
	return error;
}
Example #7
0
int
reboot(struct proc *p, register struct reboot_args *uap, __unused int32_t *retval)
{
	char command[64];
	int error=0;
	size_t dummy=0;
#if CONFIG_MACF
	kauth_cred_t my_cred;
#endif

	AUDIT_ARG(cmd, uap->opt);

	command[0] = '\0';

	if ((error = suser(kauth_cred_get(), &p->p_acflag)))
		return(error);	
	
	if (uap->opt & RB_COMMAND)
		error = copyinstr(uap->command,
					(void *)command, sizeof(command), (size_t *)&dummy);
#if CONFIG_MACF
	if (error)
		return (error);
	my_cred = kauth_cred_proc_ref(p);
	error = mac_system_check_reboot(my_cred, uap->opt);
	kauth_cred_unref(&my_cred);
#endif
	if (!error) {
		OSBitOrAtomic(P_REBOOT, &p->p_flag);  /* No more signals for this proc */
		error = boot(RB_BOOT, uap->opt, command);
	}
	return(error);
}
Example #8
0
/* ARGSUSED */
int
setaudit_addr(proc_t p, struct setaudit_addr_args *uap,
    __unused int32_t *retval)
{
	struct auditinfo_addr aia;
	kauth_cred_t scred;
	int error;

	bzero(&aia, sizeof(auditinfo_addr_t));
	error = copyin(uap->auditinfo_addr, &aia, 
	    min(sizeof(aia), uap->length));
	if (error)
		return (error);
	AUDIT_ARG(auditinfo_addr, &aia);
	if (aia.ai_termid.at_type != AU_IPv6 &&
	    aia.ai_termid.at_type != AU_IPv4)
		return (EINVAL);
	if (aia.ai_asid != AU_ASSIGN_ASID && 
	    (uint32_t)aia.ai_asid > ASSIGNED_ASID_MAX)
		return (EINVAL);

#if CONFIG_MACF
	error = mac_proc_check_setaudit(p, &aia);
	if (error)
		return (error);
#endif

	scred = kauth_cred_proc_ref(p);
	error = suser(scred, &p->p_acflag);
	if (error) {
		kauth_cred_unref(&scred);
		return (error);
	}

	WARN_IF_AINFO_ADDR_CHANGED(uap->length, sizeof(auditinfo_addr_t),
	    "setaudit_addr(2)", "auditinfo_addr_t");
	WARN_IF_BAD_ASID(aia.ai_asid, "setaudit_addr(2)");
	kauth_cred_unref(&scred);

	AUDIT_CHECK_IF_KEVENTS_MASK(aia.ai_mask);
	if (aia.ai_asid == AU_DEFAUDITSID)
		aia.ai_asid = AU_ASSIGN_ASID;

	error = audit_session_setaia(p, &aia);
	if (error)
		return (error);

	/*
	 * If asked to assign an ASID then let the user know what the ASID is
	 * by copying the auditinfo_addr struct back out.
	 */
	if (aia.ai_asid == AU_ASSIGN_ASID)
		error = getaudit_addr_internal(p, uap->auditinfo_addr,
		    uap->length);

	return (error);
}
Example #9
0
/* USV: No! need to obsolete map_fd()! mmap() already supports 64 bits */
kern_return_t
map_fd(struct map_fd_args *args)
{
	int		fd = args->fd;
	vm_offset_t	offset = args->offset;
	vm_offset_t	*va = args->va;
	boolean_t	findspace = args->findspace;
	vm_size_t	size = args->size;
	kern_return_t ret;

	AUDIT_MACH_SYSCALL_ENTER(AUE_MAPFD);
	AUDIT_ARG(addr, CAST_DOWN(user_addr_t, va));
	AUDIT_ARG(fd, fd);

	ret = map_fd_funneled( fd, (vm_object_offset_t)offset, va, findspace, size);

	AUDIT_MACH_SYSCALL_EXIT(ret);
	return ret;
}
Example #10
0
int
munlock(__unused proc_t p, struct munlock_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr;
	mach_vm_size_t size;
	vm_map_t user_map;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(addr, uap->len);

	addr = (mach_vm_offset_t) uap->addr;
	size = (mach_vm_size_t)uap->len;
	user_map = current_map();

	/* JMM - need to remove all wirings by spec - this just removes one */
	result = mach_vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE);
	return (result == KERN_SUCCESS ? 0 : ENOMEM);
}
Example #11
0
int
shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval)
{
#if CONFIG_MACF
	struct shmid_kernel *shmsegptr;
#endif
	struct shmmap_state *shmmap_s;
	int i;
	int shmdtret = 0;

	AUDIT_ARG(svipc_addr, uap->shmaddr);

	SYSV_SHM_SUBSYS_LOCK();

	if ((shmdtret = shminit())) {
		goto shmdt_out;
	}

	shmmap_s = (struct shmmap_state *)p->vm_shm;
 	if (shmmap_s == NULL) {
		shmdtret = EINVAL;
		goto shmdt_out;
	}

	for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
		if (SHMID_IS_VALID(shmmap_s->shmid) &&
		    shmmap_s->va == (mach_vm_offset_t)uap->shmaddr) {
			break;
		}
	}

	if (!SHMID_IS_VALID(shmmap_s->shmid)) {
		shmdtret = EINVAL;
		goto shmdt_out;
	}

#if CONFIG_MACF
	/*
	 * XXX: It might be useful to move this into the shm_delete_mapping
	 * function
	 */
	shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
	shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr);
	if (shmdtret)
		goto shmdt_out;
#endif
	i = shm_delete_mapping(p, shmmap_s, 1);

	if (i == 0)
		*retval = 0;
	shmdtret = i;
shmdt_out:
	SYSV_SHM_SUBSYS_UNLOCK();
	return shmdtret;
}
Example #12
0
int
reboot(struct proc *p, struct reboot_args *uap, __unused int32_t *retval)
{
	char message[128];
	int error=0;
	size_t dummy=0;
#if CONFIG_MACF
	kauth_cred_t my_cred;
#endif

	AUDIT_ARG(cmd, uap->opt);

	message[0] = '\0';

	if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
#if (DEVELOPMENT || DEBUG)
		/* allow non-root user to call panic on dev/debug kernels */
		if (!(uap->opt & RB_PANIC))
			return error;
#else
		return error;
#endif
	}

	if (uap->opt & RB_COMMAND)
                return ENOSYS;

        if (uap->opt & RB_PANIC) {
		error = copyinstr(uap->command, (void *)message, sizeof(message), (size_t *)&dummy);
        }

#if CONFIG_MACF
#if (DEVELOPMENT || DEBUG)
        if (uap->opt & RB_PANIC) {
		/* on dev/debug kernels: allow anyone to call panic */
		goto skip_cred_check;
	}
#endif
	if (error)
		return (error);
	my_cred = kauth_cred_proc_ref(p);
	error = mac_system_check_reboot(my_cred, uap->opt);
	kauth_cred_unref(&my_cred);
#if (DEVELOPMENT || DEBUG)
skip_cred_check:
#endif
#endif
	if (!error) {
		OSBitOrAtomic(P_REBOOT, &p->p_flag);  /* No more signals for this proc */
		error = reboot_kernel(uap->opt, message);
	}
	return(error);
}
Example #13
0
int
mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval)
{
	vm_map_t user_map;
	vm_map_offset_t addr;
	vm_map_size_t size, pageoff;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);

	addr = (vm_map_offset_t) uap->addr;
	size = (vm_map_size_t)uap->len;

	/* disable wrap around */
	if (addr + size < addr)
		return (EINVAL);

	if (size == 0)
		return (0);

	user_map = current_map();
	pageoff = (addr & vm_map_page_mask(user_map));
	addr -= pageoff;
	size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map));

	/* have to call vm_map_wire directly to pass "I don't know" protections */
	result = vm_map_wire(user_map, addr, addr+size, VM_PROT_NONE | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK), TRUE);

	if (result == KERN_RESOURCE_SHORTAGE)
		return EAGAIN;
	else if (result == KERN_PROTECTION_FAILURE)
		return EACCES;
	else if (result != KERN_SUCCESS)
		return ENOMEM;

	return 0;	/* KERN_SUCCESS */
}
Example #14
0
int
mac_execve_enter(user_addr_t mac_p, struct image_params *imgp)
{
	struct user_mac mac;
	struct label *execlabel;
	char *buffer;
	int error;
	size_t ulen;

	if (mac_p == USER_ADDR_NULL)
		return (0);

	if (IS_64BIT_PROCESS(current_proc())) {
		struct user64_mac mac64;
		error = copyin(mac_p, &mac64, sizeof(mac64));
		mac.m_buflen = mac64.m_buflen;
		mac.m_string = mac64.m_string;
	} else {
		struct user32_mac mac32;
		error = copyin(mac_p, &mac32, sizeof(mac32));
		mac.m_buflen = mac32.m_buflen;
		mac.m_string = mac32.m_string;
	}
	if (error)
		return (error);

	error = mac_check_structmac_consistent(&mac);
	if (error)
		return (error);

	execlabel = mac_cred_label_alloc();
	MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK);
	error = copyinstr(CAST_USER_ADDR_T(mac.m_string), buffer, mac.m_buflen, &ulen);
	if (error)
		goto out;
	AUDIT_ARG(mac_string, buffer);

	error = mac_cred_label_internalize(execlabel, buffer);
out:
	if (error) {
		mac_cred_label_free(execlabel);
		execlabel = NULL;
	}
	imgp->ip_execlabelp = execlabel;
	FREE(buffer, M_MACTEMP);
	return (error);
}
Example #15
0
int
reboot(struct proc *p, struct reboot_args *uap, __unused int32_t *retval)
{
	char message[128];
	int error=0;
	size_t dummy=0;
#if CONFIG_MACF
	kauth_cred_t my_cred;
#endif

	AUDIT_ARG(cmd, uap->opt);

	message[0] = '\0';

	if ((error = suser(kauth_cred_get(), &p->p_acflag)))
		return(error);	
	
	if (uap->opt & RB_COMMAND)
                return ENOSYS;

        if (uap->opt & RB_PANIC) {
#if !(DEVELOPMENT || DEBUG)
		if (p != initproc) {
                        return EPERM;
                }
#endif
		error = copyinstr(uap->command, (void *)message, sizeof(message), (size_t *)&dummy);
        }

#if CONFIG_MACF
	if (error)
		return (error);
	my_cred = kauth_cred_proc_ref(p);
	error = mac_system_check_reboot(my_cred, uap->opt);
	kauth_cred_unref(&my_cred);
#endif
	if (!error) {
		OSBitOrAtomic(P_REBOOT, &p->p_flag);  /* No more signals for this proc */
		error = reboot_kernel(uap->opt, message);
	}
	return(error);
}
Example #16
0
/* ARGSUSED */
int
setauid(proc_t p, struct setauid_args *uap, __unused int32_t *retval)
{
	int error;
	au_id_t	id;
	kauth_cred_t scred;
	struct auditinfo_addr aia;

	error = copyin(uap->auid, &id, sizeof(id));
	if (error)
		return (error);
	AUDIT_ARG(auid, id);

#if CONFIG_MACF
	error = mac_proc_check_setauid(p, id);
	if (error)
		return (error);
#endif

	scred = kauth_cred_proc_ref(p);
	error = suser(scred, &p->p_acflag);
	if (error) {
		kauth_cred_unref(&scred);
		return (error);
	}

	bcopy(scred->cr_audit.as_aia_p, &aia, sizeof(aia));
	if (aia.ai_asid == AU_DEFAUDITSID) {
		aia.ai_asid = AU_ASSIGN_ASID;
	}
	bcopy(&scred->cr_audit.as_mask, &aia.ai_mask, sizeof(au_mask_t));
	kauth_cred_unref(&scred);
	aia.ai_auid = id;
	error = audit_session_setaia(p, &aia);

	return (error);
}
Example #17
0
/*
 * fork1
 *
 * Description:	common code used by all new process creation other than the
 *		bootstrap of the initial process on the system
 *
 * Parameters: parent_proc		parent process of the process being
 *		child_threadp		pointer to location to receive the
 *					Mach thread_t of the child process
 *					breated
 *		kind			kind of creation being requested
 *
 * Notes:	Permissable values for 'kind':
 *
 *		PROC_CREATE_FORK	Create a complete process which will
 *					return actively running in both the
 *					parent and the child; the child copies
 *					the parent address space.
 *		PROC_CREATE_SPAWN	Create a complete process which will
 *					return actively running in the parent
 *					only after returning actively running
 *					in the child; the child address space
 *					is newly created by an image activator,
 *					after which the child is run.
 *		PROC_CREATE_VFORK	Creates a partial process which will
 *					borrow the parent task, thread, and
 *					uthread to return running in the child;
 *					the child address space and other parts
 *					are lazily created at execve() time, or
 *					the child is terminated, and the parent
 *					does not actively run until that
 *					happens.
 *
 *		At first it may seem strange that we return the child thread
 *		address rather than process structure, since the process is
 *		the only part guaranteed to be "new"; however, since we do
 *		not actualy adjust other references between Mach and BSD (see
 *		the block diagram above the implementation of vfork()), this
 *		is the only method which guarantees us the ability to get
 *		back to the other information.
 */
int
fork1(proc_t parent_proc, thread_t *child_threadp, int kind)
{
	thread_t parent_thread = (thread_t)current_thread();
	uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread);
	proc_t child_proc = NULL;	/* set in switch, but compiler... */
	thread_t child_thread = NULL;
	uid_t uid;
	int count;
	int err = 0;
	int spawn = 0;

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create.  Don't allow
	 * a nonprivileged user to use the last process; don't let root
	 * exceed the limit. The variable nprocs is the current number of
	 * processes, maxproc is the limit.
	 */
	uid = kauth_cred_get()->cr_ruid;
	proc_list_lock();
	if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
		proc_list_unlock();
		tablefull("proc");
		return (EAGAIN);
	}
	proc_list_unlock();

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit, which is
	 * always less than what an rlim_t can hold.
	 * (locking protection is provided by list lock held in chgproccnt)
	 */
	count = chgproccnt(uid, 1);
	if (uid != 0 &&
	    (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) {
	    	err = EAGAIN;
		goto bad;
	}

#if CONFIG_MACF
	/*
	 * Determine if MAC policies applied to the process will allow
	 * it to fork.  This is an advisory-only check.
	 */
	err = mac_proc_check_fork(parent_proc);
	if (err  != 0) {
		goto bad;
	}
#endif

	switch(kind) {
	case PROC_CREATE_VFORK:
		/*
		 * Prevent a vfork while we are in vfork(); we should
		 * also likely preventing a fork here as well, and this
		 * check should then be outside the switch statement,
		 * since the proc struct contents will copy from the
		 * child and the tash/thread/uthread from the parent in
		 * that case.  We do not support vfork() in vfork()
		 * because we don't have to; the same non-requirement
		 * is true of both fork() and posix_spawn() and any
		 * call  other than execve() amd _exit(), but we've
		 * been historically lenient, so we continue to be so
		 * (for now).
		 *
		 * <rdar://6640521> Probably a source of random panics
		 */
		if (parent_uthread->uu_flag & UT_VFORK) {
			printf("fork1 called within vfork by %s\n", parent_proc->p_comm);
			err = EINVAL;
			goto bad;
		}

		/*
		 * Flag us in progress; if we chose to support vfork() in
		 * vfork(), we would chain our parent at this point (in
		 * effect, a stack push).  We don't, since we actually want
		 * to disallow everything not specified in the standard
		 */
		proc_vfork_begin(parent_proc);

		/* The newly created process comes with signal lock held */
		if ((child_proc = forkproc(parent_proc)) == NULL) {
			/* Failed to allocate new process */
			proc_vfork_end(parent_proc);
			err = ENOMEM;
			goto bad;
		}

// XXX BEGIN: wants to move to be common code (and safe)
#if CONFIG_MACF
		/*
		 * allow policies to associate the credential/label that
		 * we referenced from the parent ... with the child
		 * JMM - this really isn't safe, as we can drop that
		 *       association without informing the policy in other
		 *       situations (keep long enough to get policies changed)
		 */
		mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
#endif

		/*
		 * Propogate change of PID - may get new cred if auditing.
		 *
		 * NOTE: This has no effect in the vfork case, since
		 *	child_proc->task != current_task(), but we duplicate it
		 *	because this is probably, ultimately, wrong, since we
		 *	will be running in the "child" which is the parent task
		 *	with the wrong token until we get to the execve() or
		 *	_exit() call; a lot of "undefined" can happen before
		 *	that.
		 *
		 * <rdar://6640530> disallow everything but exeve()/_exit()?
		 */
		set_security_token(child_proc);

		AUDIT_ARG(pid, child_proc->p_pid);

		AUDIT_SESSION_PROCNEW(child_proc->p_ucred);
// XXX END: wants to move to be common code (and safe)

		/*
		 * BORROW PARENT TASK, THREAD, UTHREAD FOR CHILD
		 *
		 * Note: this is where we would "push" state instead of setting
		 * it for nested vfork() support (see proc_vfork_end() for
		 * description if issues here).
		 */
		child_proc->task = parent_proc->task;

		child_proc->p_lflag  |= P_LINVFORK;
		child_proc->p_vforkact = parent_thread;
		child_proc->p_stat = SRUN;

		parent_uthread->uu_flag |= UT_VFORK;
		parent_uthread->uu_proc = child_proc;
		parent_uthread->uu_userstate = (void *)act_thread_csave();
		parent_uthread->uu_vforkmask = parent_uthread->uu_sigmask;

		/* temporarily drop thread-set-id state */
		if (parent_uthread->uu_flag & UT_SETUID) {
			parent_uthread->uu_flag |= UT_WASSETUID;
			parent_uthread->uu_flag &= ~UT_SETUID;
		}

		/* blow thread state information */
		/* XXX is this actually necessary, given syscall return? */
		thread_set_child(parent_thread, child_proc->p_pid);

		child_proc->p_acflag = AFORK;	/* forked but not exec'ed */

		/*
		 * Preserve synchronization semantics of vfork.  If
		 * waiting for child to exec or exit, set P_PPWAIT
		 * on child, and sleep on our proc (in case of exit).
		 */
		child_proc->p_lflag |= P_LPPWAIT;
		pinsertchild(parent_proc, child_proc);	/* set visible */

		break;

	case PROC_CREATE_SPAWN:
		/*
		 * A spawned process differs from a forked process in that
		 * the spawned process does not carry around the parents
		 * baggage with regard to address space copying, dtrace,
		 * and so on.
		 */
		spawn = 1;

		/* FALLSTHROUGH */

	case PROC_CREATE_FORK:
		/*
		 * When we clone the parent process, we are going to inherit
		 * its task attributes and memory, since when we fork, we
		 * will, in effect, create a duplicate of it, with only minor
		 * differences.  Contrarily, spawned processes do not inherit.
		 */
		if ((child_thread = cloneproc(parent_proc->task, parent_proc, spawn ? FALSE : TRUE)) == NULL) {
			/* Failed to create thread */
			err = EAGAIN;
			goto bad;
		}

		/* copy current thread state into the child thread (only for fork) */
		if (!spawn) {
			thread_dup(child_thread);
		}

		/* child_proc = child_thread->task->proc; */
		child_proc = (proc_t)(get_bsdtask_info(get_threadtask(child_thread)));

// XXX BEGIN: wants to move to be common code (and safe)
#if CONFIG_MACF
		/*
		 * allow policies to associate the credential/label that
		 * we referenced from the parent ... with the child
		 * JMM - this really isn't safe, as we can drop that
		 *       association without informing the policy in other
		 *       situations (keep long enough to get policies changed)
		 */
		mac_cred_label_associate_fork(child_proc->p_ucred, child_proc);
#endif

		/*
		 * Propogate change of PID - may get new cred if auditing.
		 *
		 * NOTE: This has no effect in the vfork case, since
		 *	child_proc->task != current_task(), but we duplicate it
		 *	because this is probably, ultimately, wrong, since we
		 *	will be running in the "child" which is the parent task
		 *	with the wrong token until we get to the execve() or
		 *	_exit() call; a lot of "undefined" can happen before
		 *	that.
		 *
		 * <rdar://6640530> disallow everything but exeve()/_exit()?
		 */
		set_security_token(child_proc);

		AUDIT_ARG(pid, child_proc->p_pid);

		AUDIT_SESSION_PROCNEW(child_proc->p_ucred);
// XXX END: wants to move to be common code (and safe)

		/*
		 * Blow thread state information; this is what gives the child
		 * process its "return" value from a fork() call.
		 *
		 * Note: this should probably move to fork() proper, since it
		 * is not relevent to spawn, and the value won't matter
		 * until we resume the child there.  If you are in here
		 * refactoring code, consider doing this at the same time.
		 */
		thread_set_child(child_thread, child_proc->p_pid);

		child_proc->p_acflag = AFORK;	/* forked but not exec'ed */

// <rdar://6598155> dtrace code cleanup needed
#if CONFIG_DTRACE
		/*
		 * This code applies to new processes who are copying the task
		 * and thread state and address spaces of their parent process.
		 */
		if (!spawn) {
// <rdar://6598155> call dtrace specific function here instead of all this...
		/*
		 * APPLE NOTE: Solaris does a sprlock() and drops the
		 * proc_lock here. We're cheating a bit and only taking
		 * the p_dtrace_sprlock lock. A full sprlock would
		 * task_suspend the parent.
		 */
		lck_mtx_lock(&parent_proc->p_dtrace_sprlock);

		/*
		 * Remove all DTrace tracepoints from the child process. We
		 * need to do this _before_ duplicating USDT providers since
		 * any associated probes may be immediately enabled.
		 */
		if (parent_proc->p_dtrace_count > 0) {
			dtrace_fasttrap_fork(parent_proc, child_proc);
		}

		lck_mtx_unlock(&parent_proc->p_dtrace_sprlock);

		/*
		 * Duplicate any lazy dof(s). This must be done while NOT
		 * holding the parent sprlock! Lock ordering is
		 * dtrace_dof_mode_lock, then sprlock.  It is imperative we
		 * always call dtrace_lazy_dofs_duplicate, rather than null
		 * check and call if !NULL. If we NULL test, during lazy dof
		 * faulting we can race with the faulting code and proceed
		 * from here to beyond the helpers copy. The lazy dof
		 * faulting will then fail to copy the helpers to the child
		 * process.
		 */
		dtrace_lazy_dofs_duplicate(parent_proc, child_proc);
		
		/*
		 * Duplicate any helper actions and providers. The SFORKING
		 * we set above informs the code to enable USDT probes that
		 * sprlock() may fail because the child is being forked.
		 */
		/*
		 * APPLE NOTE: As best I can tell, Apple's sprlock() equivalent
		 * never fails to find the child. We do not set SFORKING.
		 */
		if (parent_proc->p_dtrace_helpers != NULL && dtrace_helpers_fork) {
			(*dtrace_helpers_fork)(parent_proc, child_proc);
		}

		}
#endif	/* CONFIG_DTRACE */

		break;

	default:
		panic("fork1 called with unknown kind %d", kind);
		break;
	}


	/* return the thread pointer to the caller */
	*child_threadp = child_thread;

bad:
	/*
	 * In the error case, we return a 0 value for the returned pid (but
	 * it is ignored in the trampoline due to the error return); this
	 * is probably not necessary.
	 */
	if (err) {
		(void)chgproccnt(uid, -1);
	}

	return (err);
}
Example #18
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
    char 	*filename,
    int	flags,
    long	size,
    long	priority)
{
    struct vnode		*vp = 0;
    struct nameidata 	nd, *ndp;
    struct proc		*p =  current_proc();
    pager_file_t		pf;
    register int		error;
    kern_return_t		kr;
    mach_port_t		backing_store;
    memory_object_default_t	default_pager;
    int			i;
    boolean_t		funnel_state;

    struct vattr	vattr;

    AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
    AUDIT_ARG(value, priority);

    funnel_state = thread_funnel_set(kernel_flock, TRUE);
    ndp = &nd;

    if ((error = suser(p->p_ucred, &p->p_acflag)))
        goto swapon_bailout;

    if(default_pager_init_flag == 0) {
        start_def_pager(NULL);
        default_pager_init_flag = 1;
    }

    /*
     * Get a vnode for the paging area.
     */
    NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE,
           filename, p);

    if ((error = namei(ndp)))
        goto swapon_bailout;
    vp = ndp->ni_vp;

    if (vp->v_type != VREG) {
        error = EINVAL;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }
    UBCINFOCHECK("macx_swapon", vp);

    if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    if (vattr.va_size < (u_quad_t)size) {
        vattr_null(&vattr);
        vattr.va_size = (u_quad_t)size;
        error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
        if (error) {
            VOP_UNLOCK(vp, 0, p);
            goto swapon_bailout;
        }
    }

    /* add new backing store to list */
    i = 0;
    while(bs_port_table[i].vp != 0) {
        if(i == MAX_BACKING_STORE)
            break;
        i++;
    }
    if(i == MAX_BACKING_STORE) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    /* remember the vnode. This vnode has namei() reference */
    bs_port_table[i].vp = vp;

    /*
     * Look to see if we are already paging to this file.
     */
    /* make certain the copy send of kernel call will work */
    default_pager = MEMORY_OBJECT_DEFAULT_NULL;
    kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
    if(kr != KERN_SUCCESS) {
        error = EAGAIN;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    kr = default_pager_backing_store_create(default_pager,
                                            -1, /* default priority */
                                            0, /* default cluster size */
                                            &backing_store);
    memory_object_default_deallocate(default_pager);

    if(kr != KERN_SUCCESS) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    /*
     * NOTE: we are able to supply PAGE_SIZE here instead of
     *	an actual record size or block number because:
     *	a: we do not support offsets from the beginning of the
     *		file (allowing for non page size/record modulo offsets.
     *	b: because allow paging will be done modulo page size
     */

    VOP_UNLOCK(vp, 0, p);
    kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
                                ((int)vattr.va_size)/PAGE_SIZE);
    if(kr != KERN_SUCCESS) {
        bs_port_table[i].vp = 0;
        if(kr == KERN_INVALID_ARGUMENT)
            error = EINVAL;
        else
            error = ENOMEM;
        goto swapon_bailout;
    }
    bs_port_table[i].bs = (void *)backing_store;
    error = 0;
    if (!ubc_hold(vp))
        panic("macx_swapon: hold");

    /* Mark this vnode as being used for swapfile */
    SET(vp->v_flag, VSWAP);

    ubc_setcred(vp, p);

    /*
     * take an extra reference on the vnode to keep
     * vnreclaim() away from this vnode.
     */
    VREF(vp);

    /* Hold on to the namei  reference to the paging file vnode */
    vp = 0;

swapon_bailout:
    if (vp) {
        vrele(vp);
    }
    (void) thread_funnel_set(kernel_flock, FALSE);
    AUDIT_MACH_SYSCALL_EXIT(error);
    return(error);
}
Example #19
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
	struct macx_swapon_args *args)
{
	int			size = args->size;
	vnode_t			vp = (vnode_t)NULL; 
	struct nameidata 	nd, *ndp;
	register int		error;
	kern_return_t		kr;
	mach_port_t		backing_store;
	memory_object_default_t	default_pager;
	int			i;
	boolean_t		funnel_state;
	off_t			file_size;
	vfs_context_t		ctx = vfs_context_current();
	struct proc		*p =  current_proc();
	int			dp_cluster_size;


	AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
	AUDIT_ARG(value32, args->priority);

	funnel_state = thread_funnel_set(kernel_flock, TRUE);
	ndp = &nd;

	if ((error = suser(kauth_cred_get(), 0)))
		goto swapon_bailout;

	/*
	 * Get a vnode for the paging area.
	 */
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
	       ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
	       (user_addr_t) args->filename, ctx);

	if ((error = namei(ndp)))
		goto swapon_bailout;
	nameidone(ndp);
	vp = ndp->ni_vp;

	if (vp->v_type != VREG) {
		error = EINVAL;
		goto swapon_bailout;
	}

	/* get file size */
	if ((error = vnode_size(vp, &file_size, ctx)) != 0)
		goto swapon_bailout;
#if CONFIG_MACF
	vnode_lock(vp);
	error = mac_system_check_swapon(vfs_context_ucred(ctx), vp);
	vnode_unlock(vp);
	if (error)
		goto swapon_bailout;
#endif

	/* resize to desired size if it's too small */
	if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0))
		goto swapon_bailout;

	if (default_pager_init_flag == 0) {
		start_def_pager(NULL);
		default_pager_init_flag = 1;
	}

	/* add new backing store to list */
	i = 0;
	while(bs_port_table[i].vp != 0) {
		if(i == MAX_BACKING_STORE)
			break;
		i++;
	}
	if(i == MAX_BACKING_STORE) {
	   	error = ENOMEM;
		goto swapon_bailout;
	}

	/* remember the vnode. This vnode has namei() reference */
	bs_port_table[i].vp = vp;
	
	/*
	 * Look to see if we are already paging to this file.
	 */
	/* make certain the copy send of kernel call will work */
	default_pager = MEMORY_OBJECT_DEFAULT_NULL;
	kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
	if(kr != KERN_SUCCESS) {
	   error = EAGAIN;
	   bs_port_table[i].vp = 0;
	   goto swapon_bailout;
	}

	if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
		/*
		 * keep the cluster size small since the
		 * seek cost is effectively 0 which means
		 * we don't care much about fragmentation
		 */
		dp_isssd = TRUE;
		dp_cluster_size = 2 * PAGE_SIZE;
	} else {
		/*
		 * use the default cluster size
		 */
		dp_isssd = FALSE;
		dp_cluster_size = 0;
	}
	kr = default_pager_backing_store_create(default_pager, 
					-1, /* default priority */
					dp_cluster_size,
					&backing_store);
	memory_object_default_deallocate(default_pager);

	if(kr != KERN_SUCCESS) {
	   error = ENOMEM;
	   bs_port_table[i].vp = 0;
	   goto swapon_bailout;
	}

	/* Mark this vnode as being used for swapfile */
	vnode_lock_spin(vp);
	SET(vp->v_flag, VSWAP);
	vnode_unlock(vp);

	/*
	 * NOTE: we are able to supply PAGE_SIZE here instead of
	 *	an actual record size or block number because:
	 *	a: we do not support offsets from the beginning of the
	 *		file (allowing for non page size/record modulo offsets.
	 *	b: because allow paging will be done modulo page size
	 */

	kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp,
				PAGE_SIZE, (int)(file_size/PAGE_SIZE));
	if(kr != KERN_SUCCESS) {
	   bs_port_table[i].vp = 0;
	   if(kr == KERN_INVALID_ARGUMENT)
		error = EINVAL;
	   else 
		error = ENOMEM;

	   /* This vnode is not to be used for swapfile */
	   vnode_lock_spin(vp);
	   CLR(vp->v_flag, VSWAP);
	   vnode_unlock(vp);

	   goto swapon_bailout;
	}
	bs_port_table[i].bs = (void *)backing_store;
	error = 0;

	ubc_setthreadcred(vp, p, current_thread());

	/*
	 * take a long term reference on the vnode to keep
	 * vnreclaim() away from this vnode.
	 */
	vnode_ref(vp);

swapon_bailout:
	if (vp) {
		vnode_put(vp);
	}
	(void) thread_funnel_set(kernel_flock, FALSE);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Example #20
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	struct uthread		*uthread;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

#if defined(SECURE_KERNEL)
	if (0 == pid) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}
#endif

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	/*
	 * Delayed binding of thread credential to process credential, if we
	 * are not running with an explicitly set thread credential.
	 */
	uthread = get_bsdthread_info(current_thread());
	kauth_cred_uthread_update(uthread, current_proc());

	p = proc_find(pid);
	AUDIT_ARG(process, p);

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Example #21
0
/*
 * iocounts in:
 * 	--One on ni_vp.  One on ni_dvp if there is more path, or we didn't come through the
 * 	cache, or we came through the cache and the caller doesn't want the parent.
 *
 * iocounts out:
 *	--Leaves us in the correct state for the next step, whatever that might be.
 *	--If we find a symlink, returns with iocounts on both ni_vp and ni_dvp.
 *	--If we are to look up another component, then we have an iocount on ni_vp and
 *	nothing else.  
 *	--If we are done, returns an iocount on ni_vp, and possibly on ni_dvp depending on nameidata flags.
 *	--In the event of an error, may return with ni_dvp NULL'ed out (in which case, iocount
 *	was dropped).
 */
int		
lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int rdonly, 
		int vbusyflags, int *keep_going, int nc_generation,
		int wantparent, int atroot, vfs_context_t ctx)
{
	vnode_t dp;
	int error;
	char *cp;

	dp = ndp->ni_vp;
	*keep_going = 0;

	if (ndp->ni_vp == NULLVP) {
		panic("NULL ni_vp in %s\n", __FUNCTION__);
	}

	if (atroot) {
		goto nextname;
	}

#if CONFIG_TRIGGERS
	if (dp->v_resolve) {
		error = vnode_trigger_resolve(dp, ndp, ctx);
		if (error) {
			goto out;
		}
	}
#endif /* CONFIG_TRIGGERS */

	/*
	 * Take into account any additional components consumed by
	 * the underlying filesystem.
	 */
	if (cnp->cn_consume > 0) {
		cnp->cn_nameptr += cnp->cn_consume;
		ndp->ni_next += cnp->cn_consume;
		ndp->ni_pathlen -= cnp->cn_consume;
		cnp->cn_consume = 0;
	} else {
		lookup_consider_update_cache(ndp->ni_dvp, dp, cnp, nc_generation);
	}

	/*
	 * Check to see if the vnode has been mounted on...
	 * if so find the root of the mounted file system.
	 * Updates ndp->ni_vp.
	 */
	error = lookup_traverse_mountpoints(ndp, cnp, dp, vbusyflags, ctx);
	dp = ndp->ni_vp;
	if (error) {
		goto out;
	}

#if CONFIG_MACF
	if (vfs_flags(vnode_mount(dp)) & MNT_MULTILABEL) {
		error = vnode_label(vnode_mount(dp), NULL, dp, NULL, 0, ctx);
		if (error)
			goto out;
	}
#endif

	/*
	 * Check for symbolic link
	 */
	if ((dp->v_type == VLNK) &&
	    ((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) {
		cnp->cn_flags |= ISSYMLINK;
		*keep_going = 1;
		return (0);
	}

	/*
	 * Check for bogus trailing slashes.
	 */
	if ((ndp->ni_flag & NAMEI_TRAILINGSLASH)) {
		if (dp->v_type != VDIR) {
			error = ENOTDIR;
			goto out;
		}
		ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH);
	} 
	
nextname:
	/*
	 * Not a symbolic link.  If more pathname,
	 * continue at next component, else return.
	 *
	 * Definitely have a dvp if there's another slash 
	 */
	if (*ndp->ni_next == '/') {
		cnp->cn_nameptr = ndp->ni_next + 1;
		ndp->ni_pathlen--;
		while (*cnp->cn_nameptr == '/') {
			cnp->cn_nameptr++;
			ndp->ni_pathlen--;
		}

		cp = cnp->cn_nameptr;
		vnode_put(ndp->ni_dvp);
		ndp->ni_dvp = NULLVP;

		if (*cp == '\0') {
			goto emptyname;
		}

		*keep_going = 1;
		return 0;
	}
				  
	/*
	 * Disallow directory write attempts on read-only file systems.
	 */
	if (rdonly &&
	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
		error = EROFS;
		goto out;
	}
	
	/* If SAVESTART is set, we should have a dvp */
	if (cnp->cn_flags & SAVESTART) {
	        /*	
		 * note that we already hold a reference
		 * on both dp and ni_dvp, but for some reason
		 * can't get another one... in this case we
		 * need to do vnode_put on dp in 'bad2'
		 */
	        if ( (vnode_get(ndp->ni_dvp)) ) {
		        error = ENOENT;
			goto out;
		}
		ndp->ni_startdir = ndp->ni_dvp;
	}
	if (!wantparent && ndp->ni_dvp) {
		vnode_put(ndp->ni_dvp);
		ndp->ni_dvp = NULLVP;
	}

	if (cnp->cn_flags & AUDITVNPATH1)
		AUDIT_ARG(vnpath, dp, ARG_VNODE1);
	else if (cnp->cn_flags & AUDITVNPATH2)
		AUDIT_ARG(vnpath, dp, ARG_VNODE2);

#if NAMEDRSRCFORK
	/*
	 * Caller wants the resource fork.
	 */
	if ((cnp->cn_flags & CN_WANTSRSRCFORK) && (dp != NULLVP)) {
		error = lookup_handle_rsrc_fork(dp, ndp, cnp, wantparent, ctx);
		if (error != 0)
			goto out;

		dp = ndp->ni_vp;
	}
#endif
	if (kdebug_enable)
	        kdebug_lookup(dp, cnp);

	return 0;

emptyname:
	error = lookup_handle_emptyname(ndp, cnp, wantparent);
	if (error != 0) 
		goto out;

	return 0;
out:
	return error;

}
Example #22
0
kern_return_t
map_fd_funneled(
	int			fd,
	vm_object_offset_t	offset,
	vm_offset_t		*va,
	boolean_t		findspace,
	vm_size_t		size)
{
	kern_return_t	result;
	struct fileproc	*fp;
	struct vnode	*vp;
	void *	pager;
	vm_offset_t	map_addr=0;
	vm_size_t	map_size;
	int		err=0;
	vm_map_t	my_map;
	proc_t		p = current_proc();
	struct vnode_attr vattr;

	/*
	 *	Find the inode; verify that it's a regular file.
	 */

	err = fp_lookup(p, fd, &fp, 0);
	if (err)
		return(err);
	
	if (fp->f_fglob->fg_type != DTYPE_VNODE){
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	if (!(fp->f_fglob->fg_flag & FREAD)) {
		err = KERN_PROTECTION_FAILURE;
		goto bad;
	}

	vp = (struct vnode *)fp->f_fglob->fg_data;
	err = vnode_getwithref(vp);
	if(err != 0) 
		goto bad;

	if (vp->v_type != VREG) {
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	AUDIT_ARG(vnpath, vp, ARG_VNODE1);

	/*
	 * POSIX: mmap needs to update access time for mapped files
	 */
	if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
		VATTR_INIT(&vattr);
		nanotime(&vattr.va_access_time);
		VATTR_SET_ACTIVE(&vattr, va_access_time);
		vnode_setattr(vp, &vattr, vfs_context_current());
	}
	
	if (offset & PAGE_MASK_64) {
		printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}
	map_size = round_page(size);

	/*
	 * Allow user to map in a zero length file.
	 */
	if (size == 0) {
		(void)vnode_put(vp);
		err = KERN_SUCCESS;
		goto bad;
	}
	/*
	 *	Map in the file.
	 */
	pager = (void *)ubc_getpager(vp);
	if (pager == NULL) {
		(void)vnode_put(vp);
		err = KERN_FAILURE;
		goto bad;
	}


	my_map = current_map();

	result = vm_map_64(
			my_map,
			&map_addr, map_size, (vm_offset_t)0, 
			VM_FLAGS_ANYWHERE, pager, offset, TRUE,
			VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (result != KERN_SUCCESS) {
		(void)vnode_put(vp);
		err = result;
		goto bad;
	}


	if (!findspace) {
		vm_offset_t	dst_addr;
		vm_map_copy_t	tmp;

		if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr))	||
					trunc_page_32(dst_addr) != dst_addr) {
			(void) vm_map_remove(
					my_map,
					map_addr, map_addr + map_size,
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}

		result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
				       (vm_map_size_t)map_size, TRUE, &tmp);
		if (result != KERN_SUCCESS) {
			
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}

		result = vm_map_copy_overwrite(my_map,
					(vm_map_address_t)dst_addr, tmp, FALSE);
		if (result != KERN_SUCCESS) {
			vm_map_copy_discard(tmp);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}
	} else {
		if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}
	}

	ubc_setthreadcred(vp, current_proc(), current_thread());
	(void)ubc_map(vp, (PROT_READ | PROT_EXEC));
	(void)vnode_put(vp);
	err = 0;
bad:
	fp_drop(p, fd, fp, 0);
	return (err);
}
Example #23
0
/*
 * Convert a pathname into a pointer to a locked inode.
 *
 * The FOLLOW flag is set when symbolic links are to be followed
 * when they occur at the end of the name translation process.
 * Symbolic links are always followed for all other pathname
 * components other than the last.
 *
 * The segflg defines whether the name is to be copied from user
 * space or kernel space.
 *
 * Overall outline of namei:
 *
 *	copy in name
 *	get starting directory
 *	while (!done && !error) {
 *		call lookup to search path.
 *		if symbolic link, massage name in buffer and continue
 *	}
 *
 * Returns:	0			Success
 *		ENOENT			No such file or directory
 *		ELOOP			Too many levels of symbolic links
 *		ENAMETOOLONG		Filename too long
 *		copyinstr:EFAULT	Bad address
 *		copyinstr:ENAMETOOLONG	Filename too long
 *		lookup:EBADF		Bad file descriptor
 *		lookup:EROFS
 *		lookup:EACCES
 *		lookup:EPERM
 *		lookup:ERECYCLE	 vnode was recycled from underneath us in lookup.
 *						 This means we should re-drive lookup from this point.
 *		lookup: ???
 *		VNOP_READLINK:???
 */
int
namei(struct nameidata *ndp)
{
	struct filedesc *fdp;	/* pointer to file descriptor state */
	struct vnode *dp;	/* the directory we are searching */
	struct vnode *usedvp = ndp->ni_dvp;  /* store pointer to vp in case we must loop due to
										   	heavy vnode pressure */
	u_long cnpflags = ndp->ni_cnd.cn_flags; /* store in case we have to restore after loop */
	int error;
	struct componentname *cnp = &ndp->ni_cnd;
	vfs_context_t ctx = cnp->cn_context;
	proc_t p = vfs_context_proc(ctx);
#if CONFIG_AUDIT
/* XXX ut should be from context */
	uthread_t ut = (struct uthread *)get_bsdthread_info(current_thread());
#endif

	fdp = p->p_fd;

#if DIAGNOSTIC
	if (!vfs_context_ucred(ctx) || !p)
		panic ("namei: bad cred/proc");
	if (cnp->cn_nameiop & (~OPMASK))
		panic ("namei: nameiop contaminated with flags");
	if (cnp->cn_flags & OPMASK)
		panic ("namei: flags contaminated with nameiops");
#endif

	/*
	 * A compound VNOP found something that needs further processing:
	 * either a trigger vnode, a covered directory, or a symlink.
	 */
	if (ndp->ni_flag & NAMEI_CONTLOOKUP) {
		int rdonly, vbusyflags, keep_going, wantparent;

		rdonly = cnp->cn_flags & RDONLY;
		vbusyflags = ((cnp->cn_flags & CN_NBMOUNTLOOK) != 0) ? LK_NOWAIT : 0;
		keep_going = 0;
		wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT);

		ndp->ni_flag &= ~(NAMEI_CONTLOOKUP);

		error = lookup_handle_found_vnode(ndp, &ndp->ni_cnd, rdonly, vbusyflags, 
				&keep_going, ndp->ni_ncgeneration, wantparent, 0, ctx);
		if (error)
			goto out_drop;
		if (keep_going) {
			if ((cnp->cn_flags & ISSYMLINK) == 0) {
				panic("We need to keep going on a continued lookup, but for vp type %d (tag %d)\n", ndp->ni_vp->v_type, ndp->ni_vp->v_tag);
			}
			goto continue_symlink;
		}

		return 0;

	}

vnode_recycled:

	/*
	 * Get a buffer for the name to be translated, and copy the
	 * name into the buffer.
	 */
	if ((cnp->cn_flags & HASBUF) == 0) {
		cnp->cn_pnbuf = ndp->ni_pathbuf;
		cnp->cn_pnlen = PATHBUFLEN;
	}
#if LP64_DEBUG
	if ((UIO_SEG_IS_USER_SPACE(ndp->ni_segflg) == 0)
		&& (ndp->ni_segflg != UIO_SYSSPACE)
		&& (ndp->ni_segflg != UIO_SYSSPACE32)) {
		panic("%s :%d - invalid ni_segflg\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

retry_copy:
	if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
		error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf,
			    cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen);
	} else {
		error = copystr(CAST_DOWN(void *, ndp->ni_dirp), cnp->cn_pnbuf,
			    cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen);
	}
	if (error == ENAMETOOLONG && !(cnp->cn_flags & HASBUF)) {
		MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
		if (cnp->cn_pnbuf == NULL) {
			error = ENOMEM;
			goto error_out;
		}

		cnp->cn_flags |= HASBUF;
		cnp->cn_pnlen = MAXPATHLEN;
		
		goto retry_copy;
	}
	if (error)
	        goto error_out;

#if CONFIG_VOLFS
 	/*
	 * Check for legacy volfs style pathnames.
	 *
	 * For compatibility reasons we currently allow these paths,
	 * but future versions of the OS may not support them.
	 */
	if (ndp->ni_pathlen >= VOLFS_MIN_PATH_LEN &&
	    cnp->cn_pnbuf[0] == '/' &&
	    cnp->cn_pnbuf[1] == '.' &&
	    cnp->cn_pnbuf[2] == 'v' &&
	    cnp->cn_pnbuf[3] == 'o' &&
	    cnp->cn_pnbuf[4] == 'l' &&
	    cnp->cn_pnbuf[5] == '/' ) {
		char * realpath;
		int realpath_err;
		/* Attempt to resolve a legacy volfs style pathname. */
		MALLOC_ZONE(realpath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
		if (realpath) {
			/* 
			 * We only error out on the ENAMETOOLONG cases where we know that
			 * vfs_getrealpath translation succeeded but the path could not fit into
			 * MAXPATHLEN characters.  In other failure cases, we may be dealing with a path
			 * that legitimately looks like /.vol/1234/567 and is not meant to be translated
			 */
			if ((realpath_err= vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) {
				FREE_ZONE(realpath, MAXPATHLEN, M_NAMEI);
				if (realpath_err == ENOSPC || realpath_err == ENAMETOOLONG){
					error = ENAMETOOLONG;
					goto error_out;
				}
			} else {
				if (cnp->cn_flags & HASBUF) {
					FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
				}
				cnp->cn_pnbuf = realpath;
				cnp->cn_pnlen = MAXPATHLEN;
				ndp->ni_pathlen = strlen(realpath) + 1;
				cnp->cn_flags |= HASBUF | CN_VOLFSPATH;
			}
		}
	}
#endif /* CONFIG_VOLFS */

#if CONFIG_AUDIT
	/* If we are auditing the kernel pathname, save the user pathname */
	if (cnp->cn_flags & AUDITVNPATH1)
		AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH1); 
	if (cnp->cn_flags & AUDITVNPATH2)
		AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH2); 
#endif /* CONFIG_AUDIT */

	/*
	 * Do not allow empty pathnames
	 */
	if (*cnp->cn_pnbuf == '\0') {
		error = ENOENT;
		goto error_out;
	}
	ndp->ni_loopcnt = 0;

	/*
	 * determine the starting point for the translation.
	 */
	if ((ndp->ni_rootdir = fdp->fd_rdir) == NULLVP) {
	        if ( !(fdp->fd_flags & FD_CHROOT))
		        ndp->ni_rootdir = rootvnode;
	}
	cnp->cn_nameptr = cnp->cn_pnbuf;

	ndp->ni_usedvp = NULLVP;

	if (*(cnp->cn_nameptr) == '/') {
	        while (*(cnp->cn_nameptr) == '/') {
		        cnp->cn_nameptr++;
			ndp->ni_pathlen--;
		}
		dp = ndp->ni_rootdir;
	} else if (cnp->cn_flags & USEDVP) {
	        dp = ndp->ni_dvp;
		ndp->ni_usedvp = dp;
	} else
	        dp = vfs_context_cwd(ctx);

	if (dp == NULLVP || (dp->v_lflag & VL_DEAD)) {
	        error = ENOENT;
		goto error_out;
	}
	ndp->ni_dvp = NULLVP;
	ndp->ni_vp  = NULLVP;

	for (;;) {
		ndp->ni_startdir = dp;

		if ( (error = lookup(ndp)) ) {
			goto error_out;
		}
		/*
		 * Check for symbolic link
		 */
		if ((cnp->cn_flags & ISSYMLINK) == 0) {
			return (0);
		}

continue_symlink:
		/* Gives us a new path to process, and a starting dir */
		error = lookup_handle_symlink(ndp, &dp, ctx);
		if (error != 0) {
			break;
		}
	}
	/*
	 * only come here if we fail to handle a SYMLINK...
	 * if either ni_dvp or ni_vp is non-NULL, then
	 * we need to drop the iocount that was picked
	 * up in the lookup routine
	 */
out_drop:
	if (ndp->ni_dvp)
	        vnode_put(ndp->ni_dvp);
	if (ndp->ni_vp)
	        vnode_put(ndp->ni_vp);
 error_out:
	if ( (cnp->cn_flags & HASBUF) ) {
		cnp->cn_flags &= ~HASBUF;
		FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
	}
	cnp->cn_pnbuf = NULL;
	ndp->ni_vp = NULLVP;
	ndp->ni_dvp = NULLVP;
	if (error == ERECYCLE){
		/* vnode was recycled underneath us. re-drive lookup to start at 
		   the beginning again, since recycling invalidated last lookup*/
		ndp->ni_cnd.cn_flags = cnpflags;
		ndp->ni_dvp = usedvp;
		goto vnode_recycled;
	}


	return (error);
}
Example #24
0
kern_return_t
task_name_for_pid(
	struct task_name_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t		p = PROC_NULL;
	task_t		t1;
	mach_port_name_t	tret;
	void * sright;
	int error = 0, refheld = 0;
	kauth_cred_t target_cred;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 

	p = proc_find(pid);
	if (p != PROC_NULL) {
		AUDIT_ARG(process, p);
		target_cred = kauth_cred_proc_ref(p);
		refheld = 1;

		if ((p->p_stat != SZOMB)
		    && ((current_proc() == p)
			|| kauth_cred_issuser(kauth_cred_get()) 
			|| ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && 
			    ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) {

			if (p->task != TASK_NULL) {
				task_reference(p->task);
#if CONFIG_MACF
				error = mac_proc_check_get_task_name(kauth_cred_get(),  p);
				if (error) {
					task_deallocate(p->task);
					goto noperm;
				}
#endif
				sright = (void *)convert_task_name_to_port(p->task);
				tret = ipc_port_copyout_send(sright, 
						get_task_ipcspace(current_task()));
			} else
				tret  = MACH_PORT_NULL;

			AUDIT_ARG(mach_port2, tret);
			(void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
			task_deallocate(t1);
			error = KERN_SUCCESS;
			goto tnfpout;
		}
	}

#if CONFIG_MACF
noperm:
#endif
    task_deallocate(t1);
	tret = MACH_PORT_NULL;
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	error = KERN_FAILURE;
tnfpout:
	if (refheld != 0)
		kauth_cred_unref(&target_cred);
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Example #25
0
/*
 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
 * XXX usage is PROT_* from an interface perspective.  Thus the values of
 * XXX VM_PROT_* and PROT_* need to correspond.
 */
int
mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
{
	/*
	 *	Map in special device (must be SHARED) or file
	 */
	struct fileproc *fp;
	register struct		vnode *vp;
	int			flags;
	int			prot, file_prot;
	int			err=0;
	vm_map_t		user_map;
	kern_return_t		result;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t		user_size;
	vm_object_offset_t	pageoff;
	vm_object_offset_t	file_pos;
	int			alloc_flags=0;
	boolean_t		docow;
	vm_prot_t		maxprot;
	void 			*handle;
	vm_pager_t		pager;
	int 			mapanon=0;
	int 			fpref=0;
	int error =0;
	int fd = uap->fd;

	user_addr = (mach_vm_offset_t)uap->addr;
	user_size = (mach_vm_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);
	AUDIT_ARG(fd, uap->fd);

	prot = (uap->prot & VM_PROT_ALL);
#if 3777787
	/*
	 * Since the hardware currently does not support writing without
	 * read-before-write, or execution-without-read, if the request is
	 * for write or execute access, we must imply read access as well;
	 * otherwise programs expecting this to work will fail to operate.
	 */
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* radar 3777787 */

	flags = uap->flags;
	vp = NULLVP;

	/*
	 * The vm code does not have prototypes & compiler doesn't do the'
	 * the right thing when you cast 64bit value and pass it in function 
	 * call. So here it is.
	 */
	file_pos = (vm_object_offset_t)uap->pos;


	/* make sure mapping fits into numeric range etc */
	if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
		return (EINVAL);

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (file_pos & PAGE_MASK);
	file_pos -= (vm_object_offset_t)pageoff;


	/* Adjust size for rounding (on both ends). */
	user_size += pageoff;			/* low end... */
	user_size = mach_vm_round_page(user_size);	/* hi end */


	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & MAP_FIXED) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		user_addr -= pageoff;
		if (user_addr & PAGE_MASK)
		return (EINVAL);
	}
#ifdef notyet
	/* DO not have apis to get this info, need to wait till then*/
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	else if (addr < mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ))
		addr = mach_vm_round_page(p->p_vmspace->vm_daddr + MAXDSIZ);

#endif

	alloc_flags = 0;

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.  Use positive fds as the alias
		 * value for memory tracking. 
		 */
		if (fd != -1) {
			/*
			 * Use "fd" to pass (some) Mach VM allocation flags,
			 * (see the VM_FLAGS_* definitions).
			 */
			alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
					    VM_FLAGS_PURGABLE);
			if (alloc_flags != fd) {
				/* reject if there are any extra flags */
				return EINVAL;
			}
		}
			
		handle = NULL;
		maxprot = VM_PROT_ALL;
		file_pos = 0;
		mapanon = 1;
	} else {
		struct vnode_attr va;
		vfs_context_t ctx = vfs_context_current();

		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		err = fp_lookup(p, fd, &fp, 0);
		if (err)
			return(err);
		fpref = 1;
		if(fp->f_fglob->fg_type == DTYPE_PSXSHM) {
			uap->addr = (user_addr_t)user_addr;
			uap->len = (user_size_t)user_size;
			uap->prot = prot;
			uap->flags = flags;
			uap->pos = file_pos;
			error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
			goto bad;
		}

		if (fp->f_fglob->fg_type != DTYPE_VNODE) {
			error = EINVAL;
			goto bad;
		}
		vp = (struct vnode *)fp->f_fglob->fg_data;
		error = vnode_getwithref(vp);
		if(error != 0)
			goto bad;

		if (vp->v_type != VREG && vp->v_type != VCHR) {
			(void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		
		/*
		 * POSIX: mmap needs to update access time for mapped files
		 */
		if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
			VATTR_INIT(&va);
			nanotime(&va.va_access_time);
			VATTR_SET_ACTIVE(&va, va_access_time);
			vnode_setattr(vp, &va, ctx);
		}
		
		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR || vp->v_type == VSTR) {
			(void)vnode_put(vp);
			error = ENODEV;
			goto bad;
		} else {
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_fglob->fg_flag & FREAD)
				maxprot |= VM_PROT_READ;
			else if (prot & PROT_READ) {
				(void)vnode_put(vp);
				error = EACCES;
				goto bad;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out. 
			 */

			if ((flags & MAP_SHARED) != 0) {
				if ((fp->f_fglob->fg_flag & FWRITE) != 0) {
 					/*
 					 * check for write access
 					 *
 					 * Note that we already made this check when granting FWRITE
 					 * against the file, so it seems redundant here.
 					 */
 					error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
 
 					/* if not granted for any reason, but we wanted it, bad */
 					if ((prot & PROT_WRITE) && (error != 0)) {
 						vnode_put(vp);
  						goto bad;
  					}
 
 					/* if writable, remember */
 					if (error == 0)
  						maxprot |= VM_PROT_WRITE;

				} else if ((prot & PROT_WRITE) != 0) {
					(void)vnode_put(vp);
					error = EACCES;
					goto bad;
				}
			} else
				maxprot |= VM_PROT_WRITE;

			handle = (void *)vp;
#if CONFIG_MACF
			error = mac_file_check_mmap(vfs_context_ucred(ctx),
			    fp->f_fglob, prot, flags, &maxprot);
			if (error) {
				(void)vnode_put(vp);
				goto bad;
			}
#endif /* MAC */
		}
	}

	if (user_size == 0)  {
		if (!mapanon)
			(void)vnode_put(vp);
		error = 0;
		goto bad;
	}

	/*
	 *	We bend a little - round the start and end addresses
	 *	to the nearest page boundary.
	 */
	user_size = mach_vm_round_page(user_size);

	if (file_pos & PAGE_MASK_64) {
		if (!mapanon)
			(void)vnode_put(vp);
		error = EINVAL;
		goto bad;
	}

	user_map = current_map();

	if ((flags & MAP_FIXED) == 0) {
		alloc_flags |= VM_FLAGS_ANYWHERE;
		user_addr = mach_vm_round_page(user_addr);
	} else {
		if (user_addr != mach_vm_trunc_page(user_addr)) {
		        if (!mapanon)
			        (void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}
		/*
		 * mmap(MAP_FIXED) will replace any existing mappings in the
		 * specified range, if the new mapping is successful.
		 * If we just deallocate the specified address range here,
		 * another thread might jump in and allocate memory in that
		 * range before we get a chance to establish the new mapping,
		 * and we won't have a chance to restore the old mappings.
		 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
		 * has to deallocate the existing mappings and establish the
		 * new ones atomically.
		 */
		alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
	}

	if (flags & MAP_NOCACHE)
		alloc_flags |= VM_FLAGS_NO_CACHE;

	/*
	 * Lookup/allocate object.
	 */
	if (handle == NULL) {
		pager = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 IPC_PORT_NULL, 0, FALSE,
						 prot, maxprot,
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);
		if (result != KERN_SUCCESS) 
				goto out;
	} else {
		pager = (vm_pager_t)ubc_getpager(vp);
		
		if (pager == NULL) {
			(void)vnode_put(vp);
			error = ENOMEM;
			goto bad;
		}

		/*
		 *  Set credentials:
		 *	FIXME: if we're writing the file we need a way to
		 *      ensure that someone doesn't replace our R/W creds
		 * 	with ones that only work for read.
		 */

		ubc_setthreadcred(vp, p, current_thread());
		docow = FALSE;
		if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
			docow = TRUE;
		}

#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags,
						 (ipc_port_t)pager, file_pos,
						 docow, prot, maxprot, 
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		if (result != KERN_SUCCESS)  {
				(void)vnode_put(vp);
				goto out;
		}

		file_prot = prot & (PROT_READ | PROT_WRITE | PROT_EXEC);
		if (docow) {
			/* private mapping: won't write to the file */
			file_prot &= ~PROT_WRITE;
		}
		(void) ubc_map(vp, file_prot);
	}

	if (!mapanon)
		(void)vnode_put(vp);

out:
	switch (result) {
	case KERN_SUCCESS:
		*retval = user_addr + pageoff;
		error = 0;
		break;
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		error =  ENOMEM;
		break;
	case KERN_PROTECTION_FAILURE:
		error =  EACCES;
		break;
	default:
		error =  EINVAL;
		break;
	}
bad:
	if (fpref)
		fp_drop(p, fd, fp, 0);

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
			      (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);

	return(error);
}
Example #26
0
int
mprotect(__unused proc_t p, struct mprotect_args *uap, __unused register_t *retval)
{
	register vm_prot_t prot;
	mach_vm_offset_t	user_addr;
	mach_vm_size_t	user_size;
	kern_return_t	result;
	vm_map_t	user_map;
#if CONFIG_MACF
	int error;
#endif

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(len, uap->len);
	AUDIT_ARG(value, uap->prot);

	user_addr = (mach_vm_offset_t) uap->addr;
	user_size = (mach_vm_size_t) uap->len;
	prot = (vm_prot_t)(uap->prot & VM_PROT_ALL);

	if (user_addr & PAGE_MASK_64) {
		/* UNIX SPEC: user address is not page-aligned, return EINVAL */
		return EINVAL;
	}
		
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
	if (prot & VM_PROT_READ)
		prot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3936456
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* 3936456 */

	user_map = current_map();

#if CONFIG_MACF
	/*
	 * The MAC check for mprotect is of limited use for 2 reasons:
	 * Without mmap revocation, the caller could have asked for the max
	 * protections initially instead of a reduced set, so a mprotect
	 * check would offer no new security.
	 * It is not possible to extract the vnode from the pager object(s)
	 * of the target memory range.
	 * However, the MAC check may be used to prevent a process from,
	 * e.g., making the stack executable.
	 */
	error = mac_proc_check_mprotect(p, user_addr,
	    		user_size, prot);
	if (error)
		return (error);
#endif
	result = mach_vm_protect(user_map, user_addr, user_size,
				 FALSE, prot);
	switch (result) {
	case KERN_SUCCESS:
		return (0);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	case KERN_INVALID_ADDRESS:
		/* UNIX SPEC: for an invalid address range, return ENOMEM */
		return ENOMEM;
	}
	return (EINVAL);
}
int
ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);
			/* drop funnel before we return */
			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (is_suser()) {
			OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
		proc_lock(p);
		SET(p->p_lflag, P_LTRACED);
		/* Non-attached case, our tracer is our parent. */
		p->p_oppid = p->p_ppid;
		proc_unlock(p);
		return(0);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
		int		err;
		
		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			proc_unlock(t);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			proc_reparentlocked(t, pp ? pp : initproc, 1, 0);
			if (pp != PROC_NULL)
				proc_rele(pp);
			proc_lock(t);
			
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		if (uap->addr != (user_addr_t)1) {
#if defined(ppc)
#define ALIGNED(addr,size)	(((unsigned)(addr)&((size)-1))==0)
			if (!ALIGNED((int)uap->addr, sizeof(int)))
				return (ERESTART);
#undef 	ALIGNED
#endif
			thread_setentrypoint(th_act, uap->addr);
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
			psignal(t, uap->data);
                }

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit
			 */
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 */
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr));
		if (th_act == THREAD_NULL)
			return (ESRCH);
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
Example #28
0
int
ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value32, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
					      p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);

			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (kauth_cred_issuser(kauth_cred_get())) {
			OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
retry_trace_me:;
		proc_t pproc = proc_parent(p);
		if (pproc == NULL)
			return (EINVAL);
#if CONFIG_MACF
		/*
		 * NB: Cannot call kauth_authorize_process(..., KAUTH_PROCESS_CANTRACE, ...)
		 *     since that assumes the process being checked is the current process
		 *     when, in this case, it is the current process's parent.
		 *     Most of the other checks in cantrace() don't apply either.
		 */
		if ((error = mac_proc_check_debug(pproc, p)) == 0) {
#endif
			proc_lock(p);
			/* Make sure the process wasn't re-parented. */
			if (p->p_ppid != pproc->p_pid) {
				proc_unlock(p);
				proc_rele(pproc);
				goto retry_trace_me;
			}
			SET(p->p_lflag, P_LTRACED);
			/* Non-attached case, our tracer is our parent. */
			p->p_oppid = p->p_ppid;
			proc_unlock(p);
			/* Child and parent will have to be able to run modified code. */
			cs_allow_invalid(p);
			cs_allow_invalid(pproc);
#if CONFIG_MACF
		}
#endif
		proc_rele(pproc);
		return (error);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
#pragma clang diagnostic pop
		int		err;


		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			/* Check whether child and parent are allowed to run modified
			 * code (they'll have to) */
			proc_unlock(t);
			cs_allow_invalid(t);
			cs_allow_invalid(p);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			if (pp != PROC_NULL) {
				proc_reparentlocked(t, pp, 1, 0);
				proc_rele(pp);
			} else {
				/* original parent exited while traced */
				proc_list_lock();
				t->p_listflag |= P_LIST_DEADPARENT;
				proc_list_unlock();
				proc_reparentlocked(t, initproc, 1, 0);
			}
			proc_lock(t);
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
#if CONFIG_MACF
		error = mac_proc_check_signal(p, t, SIGKILL);
		if (0 != error)
			goto resume;
#endif
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		/* force use of Mach SPIs (and task_for_pid security checks) to adjust PC */
		if (uap->addr != (user_addr_t)1) {
			error = ENOTSUP;
			goto out;
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, uap->data);
			if (0 != error)
				goto out;
#endif
			psignal(t, uap->data);
		}

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit 
			 * we use sending SIGSTOP as a comparable security check.
			 */
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, SIGSTOP);
			if (0 != error) {
				goto out;
			}
#endif
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 * we use sending SIGCONT as a comparable security check.
			 */
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, SIGCONT);
			if (0 != error) {
				goto out;
			}
#endif
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_MACH_PORT_TO_NAME(uap->addr));
		if (th_act == THREAD_NULL) {
			error = ESRCH;
			goto out;
		}
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
Example #29
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 *		Note: if pid == 0, an error is return no matter who is calling.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	/* Always check if pid == 0 */
	if (pid == 0) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	p = proc_find(pid);
	if (p == PROC_NULL) {
		error = KERN_FAILURE;
		goto tfpout;
	}

#if CONFIG_AUDIT
	AUDIT_ARG(process, p);
#endif

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		extmod_statistics_incr_task_for_pid(p->task);

		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Example #30
0
/* ARGSUSED */
int
auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval)
{
	kauth_cred_t scred;
	int error = 0;
	union auditon_udata udata;
	proc_t tp = PROC_NULL;
	struct auditinfo_addr aia;

	AUDIT_ARG(cmd, uap->cmd);

#if CONFIG_MACF
	error = mac_system_check_auditon(kauth_cred_get(), uap->cmd);
	if (error)
		return (error);
#endif

	if ((uap->length <= 0) || (uap->length >
	    (int)sizeof(union auditon_udata)))
		return (EINVAL);

	memset((void *)&udata, 0, sizeof(udata));

	/*
	 * Some of the GET commands use the arguments too.
	 */
	switch (uap->cmd) {
	case A_SETPOLICY:
	case A_OLDSETPOLICY:
	case A_SETKMASK:
	case A_SETQCTRL:
	case A_OLDSETQCTRL:
	case A_SETSTAT:
	case A_SETUMASK:
	case A_SETSMASK:
	case A_SETCOND:
	case A_OLDSETCOND:
	case A_SETCLASS:
	case A_SETPMASK:
	case A_SETFSIZE:
	case A_SETKAUDIT:
	case A_GETCLASS:
	case A_GETPINFO:
	case A_GETPINFO_ADDR:
	case A_SENDTRIGGER:
	case A_GETSINFO_ADDR:
	case A_GETSFLAGS:
	case A_SETSFLAGS:
		error = copyin(uap->data, (void *)&udata, uap->length);
		if (error)
			return (error);
		AUDIT_ARG(auditon, &udata);
		AUDIT_ARG(len, uap->length);
		break;
	}

	/* Check appropriate privilege. */
	switch (uap->cmd) {
	/*
	 * A_GETSINFO doesn't require priviledge but only superuser  
	 * gets to see the audit masks. 
	 */
	case A_GETSINFO_ADDR:
		if ((sizeof(udata.au_kau_info) != uap->length) ||
	   		(audit_session_lookup(udata.au_kau_info.ai_asid,
					      &udata.au_kau_info) != 0))
			error = EINVAL;
		else if (!kauth_cred_issuser(kauth_cred_get())) {
			udata.au_kau_info.ai_mask.am_success = ~0;
			udata.au_kau_info.ai_mask.am_failure = ~0;
		}
		break;
	case A_GETSFLAGS:
	case A_SETSFLAGS:
		/* Getting one's own audit session flags requires no
		 * privilege.  Setting the flags is subject to access
		 * control implemented in audit_session_setaia().
		 */
		break;
	default:
		error = suser(kauth_cred_get(), &p->p_acflag);
		break;
	}
	if (error)
		return (error);

	/*
	 * XXX Need to implement these commands by accessing the global
	 * values associated with the commands.
	 */
	switch (uap->cmd) {
	case A_OLDGETPOLICY:
	case A_GETPOLICY:
		if (sizeof(udata.au_policy64) == uap->length) {
			mtx_lock(&audit_mtx);
			if (!audit_fail_stop)
				udata.au_policy64 |= AUDIT_CNT;
			if (audit_panic_on_write_fail)
				udata.au_policy64 |= AUDIT_AHLT;
			if (audit_argv)
				udata.au_policy64 |= AUDIT_ARGV;
			if (audit_arge)
				udata.au_policy64 |= AUDIT_ARGE;
			mtx_unlock(&audit_mtx);
			break;
		}
		if (sizeof(udata.au_policy) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		if (!audit_fail_stop)
			udata.au_policy |= AUDIT_CNT;
		if (audit_panic_on_write_fail)
			udata.au_policy |= AUDIT_AHLT;
		if (audit_argv)
			udata.au_policy |= AUDIT_ARGV;
		if (audit_arge)
			udata.au_policy |= AUDIT_ARGE;
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDSETPOLICY:
	case A_SETPOLICY:
		if (sizeof(udata.au_policy64) == uap->length) {
			if (udata.au_policy64 & ~(AUDIT_CNT|AUDIT_AHLT|
				AUDIT_ARGV|AUDIT_ARGE))
				return (EINVAL);
			mtx_lock(&audit_mtx);
			audit_fail_stop = ((udata.au_policy64 & AUDIT_CNT) ==
			    0);
			audit_panic_on_write_fail = (udata.au_policy64 &
			    AUDIT_AHLT);
			audit_argv = (udata.au_policy64 & AUDIT_ARGV);
			audit_arge = (udata.au_policy64 & AUDIT_ARGE);
			mtx_unlock(&audit_mtx);
			break;
		}	
		if ((sizeof(udata.au_policy) != uap->length) ||
		    (udata.au_policy & ~(AUDIT_CNT|AUDIT_AHLT|AUDIT_ARGV|
					 AUDIT_ARGE)))
			return (EINVAL);
		/*
		 * XXX - Need to wake up waiters if the policy relaxes?
		 */
		mtx_lock(&audit_mtx);
		audit_fail_stop = ((udata.au_policy & AUDIT_CNT) == 0);
		audit_panic_on_write_fail = (udata.au_policy & AUDIT_AHLT);
		audit_argv = (udata.au_policy & AUDIT_ARGV);
		audit_arge = (udata.au_policy & AUDIT_ARGE);
		mtx_unlock(&audit_mtx);
		break;

	case A_GETKMASK:
		if (sizeof(udata.au_mask) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		udata.au_mask = audit_nae_mask;
		mtx_unlock(&audit_mtx);
		break;

	case A_SETKMASK:
		if (sizeof(udata.au_mask) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		audit_nae_mask = udata.au_mask;
		AUDIT_CHECK_IF_KEVENTS_MASK(audit_nae_mask);
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDGETQCTRL:
	case A_GETQCTRL:
		if (sizeof(udata.au_qctrl64) == uap->length) {
			mtx_lock(&audit_mtx);
			udata.au_qctrl64.aq64_hiwater =
			    (u_int64_t)audit_qctrl.aq_hiwater;
			udata.au_qctrl64.aq64_lowater =
			    (u_int64_t)audit_qctrl.aq_lowater;
			udata.au_qctrl64.aq64_bufsz =
			    (u_int64_t)audit_qctrl.aq_bufsz;
			udata.au_qctrl64.aq64_delay =
			    (u_int64_t)audit_qctrl.aq_delay;
			udata.au_qctrl64.aq64_minfree = 
			    (int64_t)audit_qctrl.aq_minfree;
			mtx_unlock(&audit_mtx);
			break;
		} 
		if (sizeof(udata.au_qctrl) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		udata.au_qctrl = audit_qctrl;
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDSETQCTRL:
	case A_SETQCTRL:
		if (sizeof(udata.au_qctrl64) == uap->length) {
			 if ((udata.au_qctrl64.aq64_hiwater > AQ_MAXHIGH) ||
			     (udata.au_qctrl64.aq64_lowater >= 
			      udata.au_qctrl64.aq64_hiwater) ||
			     (udata.au_qctrl64.aq64_bufsz > AQ_MAXBUFSZ) ||
			     (udata.au_qctrl64.aq64_minfree < 0) ||
			     (udata.au_qctrl64.aq64_minfree > 100))
				return (EINVAL);
			mtx_lock(&audit_mtx);
			audit_qctrl.aq_hiwater =
			     (int)udata.au_qctrl64.aq64_hiwater;
			audit_qctrl.aq_lowater =
			     (int)udata.au_qctrl64.aq64_lowater;
			audit_qctrl.aq_bufsz =
			     (int)udata.au_qctrl64.aq64_bufsz;
			audit_qctrl.aq_minfree = 
			    (int)udata.au_qctrl64.aq64_minfree;
			audit_qctrl.aq_delay = -1;  /* Not used. */
			mtx_unlock(&audit_mtx);
			break;
		}
		if ((sizeof(udata.au_qctrl) != uap->length) ||
		    (udata.au_qctrl.aq_hiwater > AQ_MAXHIGH) ||
		    (udata.au_qctrl.aq_lowater >= udata.au_qctrl.aq_hiwater) ||
		    (udata.au_qctrl.aq_bufsz > AQ_MAXBUFSZ) ||
		    (udata.au_qctrl.aq_minfree < 0) ||
		    (udata.au_qctrl.aq_minfree > 100))
			return (EINVAL);

		mtx_lock(&audit_mtx);
		audit_qctrl = udata.au_qctrl;
		/* XXX The queue delay value isn't used with the kernel. */
		audit_qctrl.aq_delay = -1;
		mtx_unlock(&audit_mtx);
		break;

	case A_GETCWD:
		return (ENOSYS);

	case A_GETCAR:
		return (ENOSYS);

	case A_GETSTAT:
		return (ENOSYS);

	case A_SETSTAT:
		return (ENOSYS);

	case A_SETUMASK:
		return (ENOSYS);

	case A_SETSMASK:
		return (ENOSYS);

	case A_OLDGETCOND:
	case A_GETCOND:
		if (sizeof(udata.au_cond64) == uap->length) {
			mtx_lock(&audit_mtx);
			if (audit_enabled && !audit_suspended)
				udata.au_cond64 = AUC_AUDITING;
			else
				udata.au_cond64 = AUC_NOAUDIT;
			mtx_unlock(&audit_mtx);
			break;
		}
		if (sizeof(udata.au_cond) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		if (audit_enabled && !audit_suspended)
			udata.au_cond = AUC_AUDITING;
		else
			udata.au_cond = AUC_NOAUDIT;
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDSETCOND:
	case A_SETCOND:
		if (sizeof(udata.au_cond64) == uap->length) {
			mtx_lock(&audit_mtx);
			if (udata.au_cond64 == AUC_NOAUDIT)
				audit_suspended = 1;
			if (udata.au_cond64 == AUC_AUDITING)
				audit_suspended = 0;
			if (udata.au_cond64 == AUC_DISABLED) {
				audit_suspended = 1;
				mtx_unlock(&audit_mtx);
				audit_shutdown();
				break;
			}
			mtx_unlock(&audit_mtx);
			break;
		}
		if (sizeof(udata.au_cond) != uap->length) {
			return (EINVAL);
		}
		mtx_lock(&audit_mtx);
		if (udata.au_cond == AUC_NOAUDIT)
			audit_suspended = 1;
		if (udata.au_cond == AUC_AUDITING)
			audit_suspended = 0;
		if (udata.au_cond == AUC_DISABLED) {
			audit_suspended = 1;
			mtx_unlock(&audit_mtx);
			audit_shutdown();
			break;
		}
		mtx_unlock(&audit_mtx);
		break;

	case A_GETCLASS:
		if (sizeof(udata.au_evclass) != uap->length)
			return (EINVAL);
		udata.au_evclass.ec_class = au_event_class(
		    udata.au_evclass.ec_number);
		break;

	case A_SETCLASS:
		if (sizeof(udata.au_evclass) != uap->length)
			return (EINVAL);
		au_evclassmap_insert(udata.au_evclass.ec_number,
		    udata.au_evclass.ec_class);
		break;

	case A_GETPINFO:
		if ((sizeof(udata.au_aupinfo) != uap->length) ||
		    IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid))
			return (EINVAL);
		if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL)
			return (ESRCH);

		scred = kauth_cred_proc_ref(tp);
		if (scred->cr_audit.as_aia_p->ai_termid.at_type == AU_IPv6) {
			kauth_cred_unref(&scred);
			proc_rele(tp);
			return (EINVAL);
		}
		
		udata.au_aupinfo.ap_auid =
		    scred->cr_audit.as_aia_p->ai_auid;
		udata.au_aupinfo.ap_mask.am_success =
		    scred->cr_audit.as_mask.am_success;
		udata.au_aupinfo.ap_mask.am_failure =
		    scred->cr_audit.as_mask.am_failure;
		udata.au_aupinfo.ap_termid.machine =
		    scred->cr_audit.as_aia_p->ai_termid.at_addr[0];
		udata.au_aupinfo.ap_termid.port =
		    scred->cr_audit.as_aia_p->ai_termid.at_port;
		udata.au_aupinfo.ap_asid =
		    scred->cr_audit.as_aia_p->ai_asid;
		kauth_cred_unref(&scred);
		proc_rele(tp);
		tp = PROC_NULL;
		break;

	case A_SETPMASK:
		if ((sizeof(udata.au_aupinfo) != uap->length) ||
		    IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid))
			return (EINVAL);
		if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL)
			return (ESRCH);
		scred = kauth_cred_proc_ref(tp);
		bcopy(scred->cr_audit.as_aia_p, &aia, sizeof(aia));
		kauth_cred_unref(&scred);
		aia.ai_mask.am_success =
		    udata.au_aupinfo.ap_mask.am_success;
		aia.ai_mask.am_failure =
		    udata.au_aupinfo.ap_mask.am_failure;
		AUDIT_CHECK_IF_KEVENTS_MASK(aia.ai_mask);
		error = audit_session_setaia(tp, &aia);
		proc_rele(tp);
		tp = PROC_NULL;
		if (error)
			return (error);
		break;

	case A_SETFSIZE:
		if ((sizeof(udata.au_fstat) != uap->length) ||
		    ((udata.au_fstat.af_filesz != 0) &&
		     (udata.au_fstat.af_filesz < MIN_AUDIT_FILE_SIZE)))
			return (EINVAL);
		mtx_lock(&audit_mtx);
		audit_fstat.af_filesz = udata.au_fstat.af_filesz;
		mtx_unlock(&audit_mtx);
		break;

	case A_GETFSIZE:
		if (sizeof(udata.au_fstat) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		udata.au_fstat.af_filesz = audit_fstat.af_filesz;
		udata.au_fstat.af_currsz = audit_fstat.af_currsz;
		mtx_unlock(&audit_mtx);
		break;

	case A_GETPINFO_ADDR:
		if ((sizeof(udata.au_aupinfo_addr) != uap->length) ||
		    IS_NOT_VALID_PID(udata.au_aupinfo_addr.ap_pid))
			return (EINVAL);
		if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL)
			return (ESRCH);
		WARN_IF_AINFO_ADDR_CHANGED(uap->length,
		    sizeof(auditpinfo_addr_t), "auditon(A_GETPINFO_ADDR,...)",
		    "auditpinfo_addr_t");
		scred = kauth_cred_proc_ref(tp);
		udata.au_aupinfo_addr.ap_auid =
		    scred->cr_audit.as_aia_p->ai_auid;
		udata.au_aupinfo_addr.ap_asid =
		    scred->cr_audit.as_aia_p->ai_asid;
		udata.au_aupinfo_addr.ap_mask.am_success =
		    scred->cr_audit.as_mask.am_success;
		udata.au_aupinfo_addr.ap_mask.am_failure =
		    scred->cr_audit.as_mask.am_failure;
		bcopy(&scred->cr_audit.as_aia_p->ai_termid, 
		    &udata.au_aupinfo_addr.ap_termid,
		    sizeof(au_tid_addr_t));
		udata.au_aupinfo_addr.ap_flags =
		    scred->cr_audit.as_aia_p->ai_flags;
		kauth_cred_unref(&scred);
		proc_rele(tp);
		tp = PROC_NULL;
		break;

	case A_GETKAUDIT:
		if (sizeof(udata.au_kau_info) != uap->length) 
			return (EINVAL);
		audit_get_kinfo(&udata.au_kau_info);
		break;

	case A_SETKAUDIT:
		if ((sizeof(udata.au_kau_info) != uap->length) ||
		    (udata.au_kau_info.ai_termid.at_type != AU_IPv4 &&
		    udata.au_kau_info.ai_termid.at_type != AU_IPv6))
			return (EINVAL);
		audit_set_kinfo(&udata.au_kau_info);
		break;

	case A_SENDTRIGGER:
		if ((sizeof(udata.au_trigger) != uap->length) || 
		    (udata.au_trigger < AUDIT_TRIGGER_MIN) ||
		    (udata.au_trigger > AUDIT_TRIGGER_MAX))
			return (EINVAL);
		return (audit_send_trigger(udata.au_trigger));

	case A_GETSINFO_ADDR:
		/* Handled above before switch(). */
		break;

	case A_GETSFLAGS:
		if (sizeof(udata.au_flags) != uap->length)
			return (EINVAL);
		bcopy(&(kauth_cred_get()->cr_audit.as_aia_p->ai_flags),
		    &udata.au_flags, sizeof(udata.au_flags));
		break;

	case A_SETSFLAGS:
		if (sizeof(udata.au_flags) != uap->length)
			return (EINVAL);
		bcopy(kauth_cred_get()->cr_audit.as_aia_p, &aia, sizeof(aia));
		aia.ai_flags = udata.au_flags;
		error = audit_session_setaia(p, &aia);
		if (error)
			return (error);
		break;

	default:
		return (EINVAL);
	}

	/*
	 * Copy data back to userspace for the GET comands.
	 */
	switch (uap->cmd) {
	case A_GETPOLICY:
	case A_OLDGETPOLICY:
	case A_GETKMASK:
	case A_GETQCTRL:
	case A_OLDGETQCTRL:
	case A_GETCWD:
	case A_GETCAR:
	case A_GETSTAT:
	case A_GETCOND:
	case A_OLDGETCOND:
	case A_GETCLASS:
	case A_GETPINFO:
	case A_GETFSIZE:
	case A_GETPINFO_ADDR:
	case A_GETKAUDIT:
	case A_GETSINFO_ADDR:
	case A_GETSFLAGS:
		error = copyout((void *)&udata, uap->data, uap->length);
		if (error)
			return (ENOSYS);
		break;
	}

	return (0);
}