Example #1
2
/*
 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
 * fetching and setting of process-sized size_t and pointer values.
 */
int
sulong(user_addr_t addr, int64_t word)
{

	if (IS_64BIT_PROCESS(current_proc())) {
		return(copyout((void *)&word, addr, sizeof(word)) == 0 ? 0 : -1);
	} else {
		return(suiword(addr, (long)word));
	}
}
Example #2
2
int
suulong(user_addr_t addr, uint64_t uword)
{

	if (IS_64BIT_PROCESS(current_proc())) {
		return(copyout((void *)&uword, addr, sizeof(uword)) == 0 ? 0 : -1);
	} else {
		return(suiword(addr, (uint32_t)uword));
	}
}
Example #3
0
uint64_t
fuulong(user_addr_t addr)
{
	uint64_t ulongword;

	if (IS_64BIT_PROCESS(current_proc())) {
		if (copyin(addr, (void *)&ulongword, sizeof(ulongword)) != 0)
			return(-1ULL);
		return(ulongword);
	} else {
		return((uint64_t)fuiword(addr));
	}
}
Example #4
0
static int
vniocattach_shadow(struct vn_softc *vn, struct vn_ioctl_64 *vniop, 
				   __unused dev_t dev, int in_kernel, proc_t p)
{
	vfs_context_t ctx = vfs_context_current();
	struct nameidata nd;
	int error, flags;
	shadow_map_t *	map;
	off_t file_size;

	flags = FREAD|FWRITE;
	if (in_kernel) {
		NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx);
	}
	else {
		NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, 
			   (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), 
			   vniop->vn_file, ctx);
	}
	/* vn_open gives both long- and short-term references */
	error = vn_open(&nd, flags, 0);
	if (error) {
		/* shadow MUST be writable! */
		return (error);
	}
	if (nd.ni_vp->v_type != VREG 
	    || (error = vnode_size(nd.ni_vp, &file_size, ctx))) {
		(void)vn_close(nd.ni_vp, flags, ctx);
		vnode_put(nd.ni_vp);
		return (error ? error : EINVAL);
	}
	map = shadow_map_create(vn->sc_fsize, file_size,
				0, vn->sc_secsize);
	if (map == NULL) {
		(void)vn_close(nd.ni_vp, flags, ctx);
		vnode_put(nd.ni_vp);
		vn->sc_shadow_vp = NULL;
		return (ENOMEM);
	}
	vn->sc_shadow_vp = nd.ni_vp;
	vn->sc_shadow_vid = vnode_vid(nd.ni_vp);
	vn->sc_shadow_vp->v_flag |= VNOCACHE_DATA;
	vn->sc_shadow_map = map;
	vn->sc_flags &= ~VNF_READONLY; /* we're now read/write */

	/* lose the short-term reference */
	vnode_put(nd.ni_vp);
	return(0);
}
Example #5
0
int coalition_info(proc_t p, struct coalition_info_args *uap, __unused int32_t *retval)
{
	user_addr_t cidp = uap->cid;
	user_addr_t buffer = uap->buffer;
	user_addr_t bufsizep = uap->bufsize;
	user_size_t bufsize;
	uint32_t flavor = uap->flavor;
	int error;
	uint64_t cid;
	coalition_t coal;

	error = copyin(cidp, &cid, sizeof(cid));
	if (error) {
		return error;
	}

	coal = coalition_find_by_id(cid);
	if (coal == COALITION_NULL) {
		return ESRCH;
	}
	/* TODO: priv check? EPERM or ESRCH? */

	if (IS_64BIT_PROCESS(p)) {
		user64_size_t size64;
		error = copyin(bufsizep, &size64, sizeof(size64));
		bufsize = (user_size_t)size64;
	} else {
		user32_size_t size32;
		error = copyin(bufsizep, &size32, sizeof(size32));
		bufsize = (user_size_t)size32;
	}
	if (error) {
		goto bad;
	}

	switch (flavor) {
	case COALITION_INFO_RESOURCE_USAGE:
		error = coalition_info_resource_usage(coal, buffer, bufsize);
		break;
	default:
		error = EINVAL;
	}

bad:
	coalition_release(coal);
	return error;
}
Example #6
0
int
mac_execve_enter(user_addr_t mac_p, struct image_params *imgp)
{
	struct user_mac mac;
	struct label *execlabel;
	char *buffer;
	int error;
	size_t ulen;

	if (mac_p == USER_ADDR_NULL)
		return (0);

	if (IS_64BIT_PROCESS(current_proc())) {
		struct user64_mac mac64;
		error = copyin(mac_p, &mac64, sizeof(mac64));
		mac.m_buflen = mac64.m_buflen;
		mac.m_string = mac64.m_string;
	} else {
		struct user32_mac mac32;
		error = copyin(mac_p, &mac32, sizeof(mac32));
		mac.m_buflen = mac32.m_buflen;
		mac.m_string = mac32.m_string;
	}
	if (error)
		return (error);

	error = mac_check_structmac_consistent(&mac);
	if (error)
		return (error);

	execlabel = mac_cred_label_alloc();
	MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK);
	error = copyinstr(CAST_USER_ADDR_T(mac.m_string), buffer, mac.m_buflen, &ulen);
	if (error)
		goto out;
	AUDIT_ARG(mac_string, buffer);

	error = mac_cred_label_internalize(execlabel, buffer);
out:
	if (error) {
		mac_cred_label_free(execlabel);
		execlabel = NULL;
	}
	imgp->ip_execlabelp = execlabel;
	FREE(buffer, M_MACTEMP);
	return (error);
}
Example #7
0
static int
vniocattach_file(struct vn_softc *vn,
		 struct vn_ioctl_64 *vniop,
		 dev_t dev,
		 int in_kernel,
		 proc_t p)
{
	dev_t	cdev;
	vfs_context_t ctx = vfs_context_current();
	kauth_cred_t cred;
	struct nameidata nd;
	off_t file_size;
	int error, flags;

	flags = FREAD|FWRITE;
	if (in_kernel) {
		NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx);
	}
	else {
		NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, 
			   (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), 
			   vniop->vn_file, ctx);
	}
	/* vn_open gives both long- and short-term references */
	error = vn_open(&nd, flags, 0);
	if (error) {
		if (error != EACCES && error != EPERM && error != EROFS) {
			return (error);
		}
		flags &= ~FWRITE;
		if (in_kernel) {
			NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, 
			       vniop->vn_file, ctx);
		}
		else {
			NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, 
				   (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), 
			       vniop->vn_file, ctx);
		}
		error = vn_open(&nd, flags, 0);
		if (error) {
			return (error);
		}
	}
	if (nd.ni_vp->v_type != VREG) {
		error = EINVAL;
	}
	else {
		error = vnode_size(nd.ni_vp, &file_size, ctx);
	}
	if (error != 0) {
		(void) vn_close(nd.ni_vp, flags, ctx);
		vnode_put(nd.ni_vp);
		return (error);
	}
	cred = kauth_cred_proc_ref(p);
	nd.ni_vp->v_flag |= VNOCACHE_DATA;
	error = setcred(nd.ni_vp, cred);
	if (error) {
		(void)vn_close(nd.ni_vp, flags, ctx);
		vnode_put(nd.ni_vp);
		kauth_cred_unref(&cred);
		return(error);
	}
	vn->sc_secsize = DEV_BSIZE;
	vn->sc_fsize = file_size;
	vn->sc_size = file_size / vn->sc_secsize;
	vn->sc_vp = nd.ni_vp;
	vn->sc_vid = vnode_vid(nd.ni_vp);
	vn->sc_open_flags = flags;
	vn->sc_cred = cred;
	cdev = makedev(vndevice_cdev_major, minor(dev));
	vn->sc_cdev = devfs_make_node(cdev, DEVFS_CHAR,
				      UID_ROOT, GID_OPERATOR, 
				      0600, "rvn%d", 
				      minor(dev));
	vn->sc_flags |= VNF_INITED;
	if (flags == FREAD)
		vn->sc_flags |= VNF_READONLY;
	/* lose the short-term reference */
	vnode_put(nd.ni_vp);
	return(0);
}
Example #8
0
/*
 *	Routine:	macx_swapoff
 *	Function:
 *		Syscall interface to remove a file from backing store
 */
int
macx_swapoff(
	struct macx_swapoff_args *args)
{
	__unused int	flags = args->flags;
	kern_return_t	kr;
	mach_port_t	backing_store;

	struct vnode		*vp = 0; 
	struct nameidata 	nd, *ndp;
	struct proc		*p =  current_proc();
	int			i;
	int			error;
	boolean_t		funnel_state;
	vfs_context_t ctx = vfs_context_current();

	AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF);

	funnel_state = thread_funnel_set(kernel_flock, TRUE);
	backing_store = NULL;
	ndp = &nd;

	if ((error = suser(kauth_cred_get(), 0)))
		goto swapoff_bailout;

	/*
	 * Get the vnode for the paging area.
	 */
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
	       ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
	       (user_addr_t) args->filename, ctx);

	if ((error = namei(ndp)))
		goto swapoff_bailout;
	nameidone(ndp);
	vp = ndp->ni_vp;

	if (vp->v_type != VREG) {
		error = EINVAL;
		goto swapoff_bailout;
	}
#if CONFIG_MACF
	vnode_lock(vp);
	error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp);
	vnode_unlock(vp);
	if (error)
		goto swapoff_bailout;
#endif

	for(i = 0; i < MAX_BACKING_STORE; i++) {
		if(bs_port_table[i].vp == vp) {
			break;
		}
	}
	if (i == MAX_BACKING_STORE) {
		error = EINVAL;
		goto swapoff_bailout;
	}
	backing_store = (mach_port_t)bs_port_table[i].bs;

	kr = default_pager_backing_store_delete(backing_store);
	switch (kr) {
		case KERN_SUCCESS:
			error = 0;
			bs_port_table[i].vp = 0;
			/* This vnode is no longer used for swapfile */
			vnode_lock_spin(vp);
			CLR(vp->v_flag, VSWAP);
			vnode_unlock(vp);

			/* get rid of macx_swapon() "long term" reference */
			vnode_rele(vp);

			break;
		case KERN_FAILURE:
			error = EAGAIN;
			break;
		default:
			error = EAGAIN;
			break;
	}

swapoff_bailout:
	/* get rid of macx_swapoff() namei() reference */
	if (vp)
		vnode_put(vp);

	(void) thread_funnel_set(kernel_flock, FALSE);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Example #9
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
	struct macx_swapon_args *args)
{
	int			size = args->size;
	vnode_t			vp = (vnode_t)NULL; 
	struct nameidata 	nd, *ndp;
	register int		error;
	kern_return_t		kr;
	mach_port_t		backing_store;
	memory_object_default_t	default_pager;
	int			i;
	boolean_t		funnel_state;
	off_t			file_size;
	vfs_context_t		ctx = vfs_context_current();
	struct proc		*p =  current_proc();
	int			dp_cluster_size;


	AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
	AUDIT_ARG(value32, args->priority);

	funnel_state = thread_funnel_set(kernel_flock, TRUE);
	ndp = &nd;

	if ((error = suser(kauth_cred_get(), 0)))
		goto swapon_bailout;

	/*
	 * Get a vnode for the paging area.
	 */
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
	       ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
	       (user_addr_t) args->filename, ctx);

	if ((error = namei(ndp)))
		goto swapon_bailout;
	nameidone(ndp);
	vp = ndp->ni_vp;

	if (vp->v_type != VREG) {
		error = EINVAL;
		goto swapon_bailout;
	}

	/* get file size */
	if ((error = vnode_size(vp, &file_size, ctx)) != 0)
		goto swapon_bailout;
#if CONFIG_MACF
	vnode_lock(vp);
	error = mac_system_check_swapon(vfs_context_ucred(ctx), vp);
	vnode_unlock(vp);
	if (error)
		goto swapon_bailout;
#endif

	/* resize to desired size if it's too small */
	if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0))
		goto swapon_bailout;

	if (default_pager_init_flag == 0) {
		start_def_pager(NULL);
		default_pager_init_flag = 1;
	}

	/* add new backing store to list */
	i = 0;
	while(bs_port_table[i].vp != 0) {
		if(i == MAX_BACKING_STORE)
			break;
		i++;
	}
	if(i == MAX_BACKING_STORE) {
	   	error = ENOMEM;
		goto swapon_bailout;
	}

	/* remember the vnode. This vnode has namei() reference */
	bs_port_table[i].vp = vp;
	
	/*
	 * Look to see if we are already paging to this file.
	 */
	/* make certain the copy send of kernel call will work */
	default_pager = MEMORY_OBJECT_DEFAULT_NULL;
	kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
	if(kr != KERN_SUCCESS) {
	   error = EAGAIN;
	   bs_port_table[i].vp = 0;
	   goto swapon_bailout;
	}

	if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
		/*
		 * keep the cluster size small since the
		 * seek cost is effectively 0 which means
		 * we don't care much about fragmentation
		 */
		dp_isssd = TRUE;
		dp_cluster_size = 2 * PAGE_SIZE;
	} else {
		/*
		 * use the default cluster size
		 */
		dp_isssd = FALSE;
		dp_cluster_size = 0;
	}
	kr = default_pager_backing_store_create(default_pager, 
					-1, /* default priority */
					dp_cluster_size,
					&backing_store);
	memory_object_default_deallocate(default_pager);

	if(kr != KERN_SUCCESS) {
	   error = ENOMEM;
	   bs_port_table[i].vp = 0;
	   goto swapon_bailout;
	}

	/* Mark this vnode as being used for swapfile */
	vnode_lock_spin(vp);
	SET(vp->v_flag, VSWAP);
	vnode_unlock(vp);

	/*
	 * NOTE: we are able to supply PAGE_SIZE here instead of
	 *	an actual record size or block number because:
	 *	a: we do not support offsets from the beginning of the
	 *		file (allowing for non page size/record modulo offsets.
	 *	b: because allow paging will be done modulo page size
	 */

	kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp,
				PAGE_SIZE, (int)(file_size/PAGE_SIZE));
	if(kr != KERN_SUCCESS) {
	   bs_port_table[i].vp = 0;
	   if(kr == KERN_INVALID_ARGUMENT)
		error = EINVAL;
	   else 
		error = ENOMEM;

	   /* This vnode is not to be used for swapfile */
	   vnode_lock_spin(vp);
	   CLR(vp->v_flag, VSWAP);
	   vnode_unlock(vp);

	   goto swapon_bailout;
	}
	bs_port_table[i].bs = (void *)backing_store;
	error = 0;

	ubc_setthreadcred(vp, p, current_thread());

	/*
	 * take a long term reference on the vnode to keep
	 * vnreclaim() away from this vnode.
	 */
	vnode_ref(vp);

swapon_bailout:
	if (vp) {
		vnode_put(vp);
	}
	(void) thread_funnel_set(kernel_flock, FALSE);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Example #10
0
/* ARGSUSED */
int
auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval)
{
	struct nameidata nd;
	kauth_cred_t cred;
	struct vnode *vp;
	int error = 0;

	error = suser(kauth_cred_get(), &p->p_acflag);
	if (error)
		return (error);

	vp = NULL;
	cred = NULL;

	/*
	 * If a path is specified, open the replacement vnode, perform
	 * validity checks, and grab another reference to the current
	 * credential.
	 *
	 * XXX Changes API slightly.  NULL path no longer disables audit but
	 * returns EINVAL.
	 */
	if (uap->path == USER_ADDR_NULL)
		return (EINVAL);

	NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF | AUDITVNPATH1,
	    (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 :
	    UIO_USERSPACE32), uap->path, vfs_context_current());
	error = vn_open(&nd, AUDIT_OPEN_FLAGS, 0);
	if (error)
		return (error);
	vp = nd.ni_vp;
#if CONFIG_MACF
	/*
	 * Accessibility of the vnode was determined in vn_open; the
	 * mac_system_check_auditctl should only determine whether that vnode
	 * is appropriate for storing audit data, or that the caller was
	 * permitted to control the auditing system at all.  For example, a
	 * confidentiality policy may want to ensure that audit files are
	 * always high sensitivity.
	 */
	error = mac_system_check_auditctl(kauth_cred_get(), vp);
	if (error) {
		vn_close(vp, AUDIT_CLOSE_FLAGS, vfs_context_current());
		vnode_put(vp);
		return (error);
	}
#endif
	if (vp->v_type != VREG) {
		vn_close(vp, AUDIT_CLOSE_FLAGS, vfs_context_current());
		vnode_put(vp);
		return (EINVAL);
	}
	mtx_lock(&audit_mtx);
	/*
	 * XXXAUDIT: Should audit_suspended actually be cleared by
	 * audit_worker?
	 */
	audit_suspended = 0;
	mtx_unlock(&audit_mtx);

	/*
	 * The following gets unreferenced in audit_rotate_vnode()
	 * after the rotation and it is no longer needed.
	 */
	cred = kauth_cred_get_with_ref();
	audit_rotate_vnode(cred, vp);
	vnode_put(vp);

	return (error);
}
Example #11
0
int
msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval)
{
	int msqid = uap->msqid;
	int cmd = uap->cmd;
	kauth_cred_t cred = kauth_cred_get();
	int rval, eval;
	struct user_msqid_ds msqbuf;
	struct msqid_kernel *msqptr;

	SYSV_MSG_SUBSYS_LOCK();

	if (!msginit(0)) {
		eval =  ENOMEM;
		goto msgctlout;
	}

#ifdef MSG_DEBUG_OK
	printf("call to msgctl(%d, %d, 0x%qx)\n", msqid, cmd, uap->buf);
#endif

	AUDIT_ARG(svipc_cmd, cmd);
	AUDIT_ARG(svipc_id, msqid);
	msqid = IPCID_TO_IX(msqid);

	if (msqid < 0 || msqid >= msginfo.msgmni) {
#ifdef MSG_DEBUG_OK
		printf("msqid (%d) out of range (0<=msqid<%d)\n", msqid,
		    msginfo.msgmni);
#endif
		eval = EINVAL;
		goto msgctlout;
	}

	msqptr = &msqids[msqid];

	if (msqptr->u.msg_qbytes == 0) {
#ifdef MSG_DEBUG_OK
		printf("no such msqid\n");
#endif
		eval = EINVAL;
		goto msgctlout;
	}
	if (msqptr->u.msg_perm._seq != IPCID_TO_SEQ(uap->msqid)) {
#ifdef MSG_DEBUG_OK
		printf("wrong sequence number\n");
#endif
		eval = EINVAL;
		goto msgctlout;
	}
#if CONFIG_MACF
	eval = mac_sysvmsq_check_msqctl(kauth_cred_get(), msqptr, cmd);
	if (eval) 
		goto msgctlout;
#endif

	eval = 0;
	rval = 0;

	switch (cmd) {

	case IPC_RMID:
	{
		struct msg *msghdr;
		if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_M)))
			goto msgctlout;
#if CONFIG_MACF
		/*
		 * Check that the thread has MAC access permissions to
		 * individual msghdrs.  Note: We need to do this in a
		 * separate loop because the actual loop alters the
		 * msq/msghdr info as it progresses, and there is no going
		 * back if half the way through we discover that the
		 * thread cannot free a certain msghdr.  The msq will get
		 * into an inconsistent state.
		 */
		for (msghdr = msqptr->u.msg_first; msghdr != NULL;
		    msghdr = msghdr->msg_next) {
			eval = mac_sysvmsq_check_msgrmid(kauth_cred_get(), msghdr);
			if (eval) 
				goto msgctlout;
		}
#endif
		/* Free the message headers */
		msghdr = msqptr->u.msg_first;
		while (msghdr != NULL) {
			struct msg *msghdr_tmp;

			/* Free the segments of each message */
			msqptr->u.msg_cbytes -= msghdr->msg_ts;
			msqptr->u.msg_qnum--;
			msghdr_tmp = msghdr;
			msghdr = msghdr->msg_next;
			msg_freehdr(msghdr_tmp);
		}

		if (msqptr->u.msg_cbytes != 0)
			panic("msg_cbytes is messed up");
		if (msqptr->u.msg_qnum != 0)
			panic("msg_qnum is messed up");

		msqptr->u.msg_qbytes = 0;	/* Mark it as free */
#if CONFIG_MACF
		mac_sysvmsq_label_recycle(msqptr);
#endif

		wakeup((caddr_t)msqptr);
	}

		break;

	case IPC_SET:
		if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_M)))
			goto msgctlout;

		SYSV_MSG_SUBSYS_UNLOCK();

		if (IS_64BIT_PROCESS(p)) {
			struct user64_msqid_ds tmpds;
			eval = copyin(uap->buf, &tmpds, sizeof(tmpds));

			msqid_ds_user64tokernel(&tmpds, &msqbuf);
		} else {
			struct user32_msqid_ds tmpds;

			eval = copyin(uap->buf, &tmpds, sizeof(tmpds));

			msqid_ds_user32tokernel(&tmpds, &msqbuf);
		}
		if (eval)
			return(eval);

		SYSV_MSG_SUBSYS_LOCK();

		if (msqbuf.msg_qbytes > msqptr->u.msg_qbytes) {
			eval = suser(cred, &p->p_acflag);
			if (eval)
				goto msgctlout;
		}


		/* compare (msglen_t) value against restrict (int) value */
		if (msqbuf.msg_qbytes > (user_msglen_t)msginfo.msgmnb) {
#ifdef MSG_DEBUG_OK
			printf("can't increase msg_qbytes beyond %d (truncating)\n",
			    msginfo.msgmnb);
#endif
			msqbuf.msg_qbytes = msginfo.msgmnb;	/* silently restrict qbytes to system limit */
		}
		if (msqbuf.msg_qbytes == 0) {
#ifdef MSG_DEBUG_OK
			printf("can't reduce msg_qbytes to 0\n");
#endif
			eval = EINVAL;
			goto msgctlout;
		}
		msqptr->u.msg_perm.uid = msqbuf.msg_perm.uid;	/* change the owner */
		msqptr->u.msg_perm.gid = msqbuf.msg_perm.gid;	/* change the owner */
		msqptr->u.msg_perm.mode = (msqptr->u.msg_perm.mode & ~0777) |
		    (msqbuf.msg_perm.mode & 0777);
		msqptr->u.msg_qbytes = msqbuf.msg_qbytes;
		msqptr->u.msg_ctime = sysv_msgtime();
		break;

	case IPC_STAT:
		if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_R))) {
#ifdef MSG_DEBUG_OK
			printf("requester doesn't have read access\n");
#endif
			goto msgctlout;
		}

		SYSV_MSG_SUBSYS_UNLOCK();
		if (IS_64BIT_PROCESS(p)) {
			struct user64_msqid_ds msqid_ds64 = {};
			msqid_ds_kerneltouser64(&msqptr->u, &msqid_ds64);
			eval = copyout(&msqid_ds64, uap->buf, sizeof(msqid_ds64));
		} else {
			struct user32_msqid_ds msqid_ds32 = {};
			msqid_ds_kerneltouser32(&msqptr->u, &msqid_ds32);
			eval = copyout(&msqid_ds32, uap->buf, sizeof(msqid_ds32));
		}
		SYSV_MSG_SUBSYS_LOCK();
		break;

	default:
#ifdef MSG_DEBUG_OK
		printf("invalid command %d\n", cmd);
#endif
		eval = EINVAL;
		goto msgctlout;
	}

	if (eval == 0)
		*retval = rval;
msgctlout:
	SYSV_MSG_SUBSYS_UNLOCK();
	return(eval);
}
Example #12
0
void
sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code)
{
	kern_return_t kretn;
	struct mcontext mctx;
	user_addr_t p_mctx = USER_ADDR_NULL;		/* mcontext dest. */
	struct mcontext64 mctx64;
	user_addr_t p_mctx64 = USER_ADDR_NULL;		/* mcontext dest. */
	struct user_ucontext64 uctx;
	user_addr_t p_uctx;		/* user stack addr top copy ucontext */
	user_siginfo_t sinfo;
	user_addr_t p_sinfo;		/* user stack addr top copy siginfo */
	struct sigacts *ps = p->p_sigacts;
	int oonstack;
	user_addr_t sp;
	mach_msg_type_number_t state_count;
	thread_t th_act;
	struct uthread *ut;
	int infostyle = UC_TRAD;
	int dualcontext =0;
	user_addr_t trampact;
	int vec_used = 0;
	int stack_size = 0;
	void * tstate;
	int flavor;
        int ctx32 = 1;

	th_act = current_thread();
	ut = get_bsdthread_info(th_act);

	/*
	 * XXX We conditionalize type passed here based on SA_SIGINFO, but
	 * XXX we always send up all the information, regardless; perhaps
	 * XXX this should not be conditionalized?  Defer making this change
	 * XXX now, due to possible tools impact.
	 */
	if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
		/*
		 * If SA_SIGINFO is set, then we must provide the user
		 * process both a siginfo_t and a context argument.  We call
		 * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't
		 * expect a context.  "DUAL" is a type of "FLAVORED".
		 */
		if (is_64signalregset()) {
			/*
			 * If this is a 64 bit CPU, we must include a 64 bit
			 * context in the data we pass to user space; we may
			 * or may not also include a 32 bit context at the
			 * same time, for non-leaf functions.
			 *
			 * The user may also explicitly choose to not receive
			 * a 32 bit context, at their option; we only allow
			 * this to happen on 64 bit processors, for obvious
			 * reasons.
			 */
			if (IS_64BIT_PROCESS(p) ||
			    (p->p_sigacts->ps_64regset & sigmask(sig))) {
				 /*
				  * For a 64 bit process, there is no 32 bit
				  * context.
				  */
				ctx32 = 0;
				infostyle = UC_FLAVOR64;
			} else {
				/*
				 * For a 32 bit process on a 64 bit CPU, we
				 * may have 64 bit leaf functions, so we need
				 * both contexts.
				 */
				dualcontext = 1;
				infostyle = UC_DUAL;
			}
		} else {
			/*
			 * If this is a 32 bit CPU, then we only have a 32 bit
			 * context to contend with.
			 */
			infostyle = UC_FLAVOR;
		}
        } else {
		/*
		 * If SA_SIGINFO is not set, then we have a traditional style
		 * call which does not need additional context passed.  The
		 * default is 32 bit traditional.
		 *
		 * XXX The second check is redundant on PPC32; keep it anyway.
		 */
		if (is_64signalregset() || IS_64BIT_PROCESS(p)) {
			/*
			 * However, if this is a 64 bit CPU, we need to change
			 * this to 64 bit traditional, and drop the 32 bit
			 * context.
			 */
			ctx32 = 0;
			infostyle = UC_TRAD64;
		}
	}

	proc_unlock(p);

	/* I need this for SIGINFO anyway */
	flavor = PPC_THREAD_STATE;
	tstate = (void *)&mctx.ss;
	state_count = PPC_THREAD_STATE_COUNT;
	if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
		goto bad;

	if ((ctx32 == 0) || dualcontext) {
		flavor = PPC_THREAD_STATE64;
		tstate = (void *)&mctx64.ss;
		state_count = PPC_THREAD_STATE64_COUNT;
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
	}

        if ((ctx32 == 1) || dualcontext) {
                flavor = PPC_EXCEPTION_STATE;
		tstate = (void *)&mctx.es;
		state_count = PPC_EXCEPTION_STATE_COUNT;
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
       } 
       
	if ((ctx32 == 0) || dualcontext) {
 		flavor = PPC_EXCEPTION_STATE64;
		tstate = (void *)&mctx64.es;
		state_count = PPC_EXCEPTION_STATE64_COUNT;
       
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
                
        }
       

        if ((ctx32 == 1) || dualcontext) {
                flavor = PPC_FLOAT_STATE;
		tstate = (void *)&mctx.fs;
		state_count = PPC_FLOAT_STATE_COUNT;
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
       } 
       
	if ((ctx32 == 0) || dualcontext) {
 		flavor = PPC_FLOAT_STATE;
		tstate = (void *)&mctx64.fs;
		state_count = PPC_FLOAT_STATE_COUNT;
                       if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
                
        }


	if (find_user_vec_curr()) {
		vec_used = 1;

                if ((ctx32 == 1) || dualcontext) {
                    flavor = PPC_VECTOR_STATE;
                    tstate = (void *)&mctx.vs;
                    state_count = PPC_VECTOR_STATE_COUNT;
                    if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
                    infostyle += 5;
            } 
       
            if ((ctx32 == 0) || dualcontext) {
                    flavor = PPC_VECTOR_STATE;
                    tstate = (void *)&mctx64.vs;
                    state_count = PPC_VECTOR_STATE_COUNT;
                    if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                        goto bad;
                    infostyle += 5;
           }
	}  

	trampact = ps->ps_trampact[sig];
	oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;

	/* figure out where our new stack lives */
	if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
		(ps->ps_sigonstack & sigmask(sig))) {
		sp = ut->uu_sigstk.ss_sp;
		sp += ut->uu_sigstk.ss_size;
		stack_size = ut->uu_sigstk.ss_size;
		ut->uu_sigstk.ss_flags |= SA_ONSTACK;
	}
	else {
		if (ctx32 == 0)
			sp = mctx64.ss.r1;
		else
			sp = CAST_USER_ADDR_T(mctx.ss.r1);
	}

	
	/* put siginfo on top */
        
	/* preserve RED ZONE area */
	if (IS_64BIT_PROCESS(p))
		sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
	else
		sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);

        /* next are the saved registers */
        if ((ctx32 == 0) || dualcontext) {
            sp -= sizeof(struct mcontext64);
            p_mctx64 = sp;
        }
        if ((ctx32 == 1) || dualcontext) {
            sp -= sizeof(struct mcontext);
            p_mctx = sp;
        }    
        
	if (IS_64BIT_PROCESS(p)) {
		/* context goes first on stack */
		sp -= sizeof(struct user_ucontext64);
		p_uctx = sp;

		/* this is where siginfo goes on stack */
		sp -= sizeof(user_siginfo_t);
		p_sinfo = sp;
		
		sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
	} else {
		/*
		 * struct ucontext and struct ucontext64 are identical in
		 * size and content; the only difference is the internal
		 * pointer type for the last element, which makes no
		 * difference for the copyout().
		 */

		/* context goes first on stack */
		sp -= sizeof(struct ucontext64);
		p_uctx = sp;

		/* this is where siginfo goes on stack */
		sp -= sizeof(siginfo_t);
		p_sinfo = sp;

		sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
	}

	uctx.uc_onstack = oonstack;
	uctx.uc_sigmask = mask;
	uctx.uc_stack.ss_sp = sp;
	uctx.uc_stack.ss_size = stack_size;
	if (oonstack)
		uctx.uc_stack.ss_flags |= SS_ONSTACK;
		
	uctx.uc_link = 0;
	if (ctx32 == 0)
		uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
	else
		uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
	
	if (vec_used) 
		uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
        
	if (ctx32 == 0)
             uctx.uc_mcontext64 = p_mctx64;
       else
            uctx.uc_mcontext64 = p_mctx;

	/* setup siginfo */
	bzero((caddr_t)&sinfo, sizeof(user_siginfo_t));
	sinfo.si_signo = sig;
	if (ctx32 == 0) {
		sinfo.si_addr = mctx64.ss.srr0;
		sinfo.pad[0] = mctx64.ss.r1;
	} else {
		sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
		sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
	}

	switch (sig) {
		case SIGILL:
			/*
			 * If it's 64 bit and not a dual context, mctx will
			 * contain uninitialized data, so we have to use
			 * mctx64 here.
			 */
			if(ctx32 == 0) {
				if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
					sinfo.si_code = ILL_ILLOPC;
				else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
					sinfo.si_code = ILL_PRVOPC;
				else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
					sinfo.si_code = ILL_ILLTRP;
				else
					sinfo.si_code = ILL_NOOP;
			} else {
				if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
					sinfo.si_code = ILL_ILLOPC;
				else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
					sinfo.si_code = ILL_PRVOPC;
				else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
					sinfo.si_code = ILL_ILLTRP;
				else
					sinfo.si_code = ILL_NOOP;
			}
			break;
		case SIGFPE:
#define FPSCR_VX	2
#define FPSCR_OX	3
#define FPSCR_UX	4
#define FPSCR_ZX	5
#define FPSCR_XX	6
			/*
			 * If it's 64 bit and not a dual context, mctx will
			 * contain uninitialized data, so we have to use
			 * mctx64 here.
			 */
			if(ctx32 == 0) {
				if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
					sinfo.si_code = FPE_FLTINV;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
					sinfo.si_code = FPE_FLTOVF;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
					sinfo.si_code = FPE_FLTUND;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
					sinfo.si_code = FPE_FLTDIV;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
					sinfo.si_code = FPE_FLTRES;
				else
					sinfo.si_code = FPE_NOOP;
			} else {
				if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
					sinfo.si_code = FPE_FLTINV;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
					sinfo.si_code = FPE_FLTOVF;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
					sinfo.si_code = FPE_FLTUND;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
					sinfo.si_code = FPE_FLTDIV;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
					sinfo.si_code = FPE_FLTRES;
				else
					sinfo.si_code = FPE_NOOP;
			}
			break;

		case SIGBUS:
			if (ctx32 == 0) {
				sinfo.si_addr = mctx64.es.dar;
			} else {
				sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
			}
			/* on ppc we generate only if EXC_PPC_UNALIGNED */
			sinfo.si_code = BUS_ADRALN;
			break;

		case SIGSEGV:
			/*
			 * If it's 64 bit and not a dual context, mctx will
			 * contain uninitialized data, so we have to use
			 * mctx64 here.
			 */
			if (ctx32 == 0) {
				sinfo.si_addr = mctx64.es.dar;
				/* First check in srr1 and then in dsisr */
				if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else
					sinfo.si_code = SEGV_MAPERR;
			} else {
				sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
				/* First check in srr1 and then in dsisr */
				if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else
					sinfo.si_code = SEGV_MAPERR;
			}
			break;
		default:
		{
			int status_and_exitcode;

			/*
			 * All other signals need to fill out a minimum set of
			 * information for the siginfo structure passed into
			 * the signal handler, if SA_SIGINFO was specified.
			 *
			 * p->si_status actually contains both the status and
			 * the exit code; we save it off in its own variable
			 * for later breakdown.
			 */
			proc_lock(p);
			sinfo.si_pid = p->si_pid;
			p->si_pid = 0;
			status_and_exitcode = p->si_status;
			p->si_status = 0;
			sinfo.si_uid = p->si_uid;
			p->si_uid = 0;
			sinfo.si_code = p->si_code;
			p->si_code = 0;
			proc_unlock(p);
			if (sinfo.si_code == CLD_EXITED) {
				if (WIFEXITED(status_and_exitcode)) 
					sinfo.si_code = CLD_EXITED;
				else if (WIFSIGNALED(status_and_exitcode)) {
					if (WCOREDUMP(status_and_exitcode)) {
						sinfo.si_code = CLD_DUMPED;
						status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
					} else {
						sinfo.si_code = CLD_KILLED;
						status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
					}
				}
			}
			/*
			 * The recorded status contains the exit code and the
			 * signal information, but the information to be passed
			 * in the siginfo to the handler is supposed to only
			 * contain the status, so we have to shift it out.
			 */
			sinfo.si_status = WEXITSTATUS(status_and_exitcode);
			break;
		}
	}


	/* copy info out to user space */
	if (IS_64BIT_PROCESS(p)) {

		/* XXX truncates catcher address to uintptr_t */
		DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &sinfo,
			void (*)(void), CAST_DOWN(sig_t, catcher));

		if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
			goto bad;
		if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t)))
			goto bad;
	} else {