コード例 #1
0
ファイル: vfs_mountroot.c プロジェクト: ChristosKa/freebsd
static void
set_rootvnode(void)
{
	struct proc *p;

	if (VFS_ROOT(TAILQ_FIRST(&mountlist), LK_EXCLUSIVE, &rootvnode))
		panic("Cannot find root vnode");

	VOP_UNLOCK(rootvnode, 0);

	p = curthread->td_proc;
	FILEDESC_XLOCK(p->p_fd);

	if (p->p_fd->fd_cdir != NULL)
		vrele(p->p_fd->fd_cdir);
	p->p_fd->fd_cdir = rootvnode;
	VREF(rootvnode);

	if (p->p_fd->fd_rdir != NULL)
		vrele(p->p_fd->fd_rdir);
	p->p_fd->fd_rdir = rootvnode;
	VREF(rootvnode);

	FILEDESC_XUNLOCK(p->p_fd);
}
コード例 #2
0
static void *
kobj_open_file_vnode(const char *file)
{
	struct thread *td = curthread;
	struct filedesc *fd;
	struct nameidata nd;
	int error, flags, vfslocked;

	fd = td->td_proc->p_fd;
	FILEDESC_XLOCK(fd);
	if (fd->fd_rdir == NULL) {
		fd->fd_rdir = rootvnode;
		vref(fd->fd_rdir);
	}
	if (fd->fd_cdir == NULL) {
		fd->fd_cdir = rootvnode;
		vref(fd->fd_cdir);
	}
	FILEDESC_XUNLOCK(fd);

	flags = FREAD | O_NOFOLLOW;
	NDINIT(&nd, LOOKUP, MPSAFE, UIO_SYSSPACE, file, td);
	error = vn_open_cred(&nd, &flags, 0, 0, curthread->td_ucred, NULL);
	if (error != 0)
		return (NULL);
	vfslocked = NDHASGIANT(&nd);
	NDFREE(&nd, NDF_ONLY_PNBUF);
	/* We just unlock so we hold a reference. */
	VOP_UNLOCK(nd.ni_vp, 0);
	VFS_UNLOCK_GIANT(vfslocked);
	return (nd.ni_vp);
}
コード例 #3
0
ファイル: sys_capability.c プロジェクト: 2trill2spill/freebsd
int
kern_cap_rights_limit(struct thread *td, int fd, cap_rights_t *rights)
{
	struct filedesc *fdp;
	int error;

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);
	if (fget_locked(fdp, fd) == NULL) {
		FILEDESC_XUNLOCK(fdp);
		return (EBADF);
	}
	error = _cap_check(cap_rights(fdp, fd), rights, CAPFAIL_INCREASE);
	if (error == 0) {
		fdp->fd_ofiles[fd].fde_rights = *rights;
		if (!cap_rights_is_set(rights, CAP_IOCTL)) {
			free(fdp->fd_ofiles[fd].fde_ioctls, M_FILECAPS);
			fdp->fd_ofiles[fd].fde_ioctls = NULL;
			fdp->fd_ofiles[fd].fde_nioctls = 0;
		}
		if (!cap_rights_is_set(rights, CAP_FCNTL))
			fdp->fd_ofiles[fd].fde_fcntls = 0;
	}
	FILEDESC_XUNLOCK(fdp);
	return (error);
}
コード例 #4
0
ファイル: sys_capability.c プロジェクト: 2trill2spill/freebsd
int
sys_cap_fcntls_limit(struct thread *td, struct cap_fcntls_limit_args *uap)
{
	struct filedesc *fdp;
	uint32_t fcntlrights;
	int fd;

	fd = uap->fd;
	fcntlrights = uap->fcntlrights;

	AUDIT_ARG_FD(fd);
	AUDIT_ARG_FCNTL_RIGHTS(fcntlrights);

	if ((fcntlrights & ~CAP_FCNTL_ALL) != 0)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);

	if (fget_locked(fdp, fd) == NULL) {
		FILEDESC_XUNLOCK(fdp);
		return (EBADF);
	}

	if ((fcntlrights & ~fdp->fd_ofiles[fd].fde_fcntls) != 0) {
		FILEDESC_XUNLOCK(fdp);
		return (ENOTCAPABLE);
	}

	fdp->fd_ofiles[fd].fde_fcntls = fcntlrights;
	FILEDESC_XUNLOCK(fdp);

	return (0);
}
コード例 #5
0
ファイル: sys_capability.c プロジェクト: ChaosJohn/freebsd
int
kern_cap_ioctls_limit(struct thread *td, int fd, u_long *cmds, size_t ncmds)
{
	struct filedesc *fdp;
	u_long *ocmds;
	int error;

	AUDIT_ARG_FD(fd);

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);

	if (fget_locked(fdp, fd) == NULL) {
		error = EBADF;
		goto out;
	}

	error = cap_ioctl_limit_check(fdp, fd, cmds, ncmds);
	if (error != 0)
		goto out;

	ocmds = fdp->fd_ofiles[fd].fde_ioctls;
	fdp->fd_ofiles[fd].fde_ioctls = cmds;
	fdp->fd_ofiles[fd].fde_nioctls = ncmds;

	cmds = ocmds;
	error = 0;
out:
	FILEDESC_XUNLOCK(fdp);
	free(cmds, M_FILECAPS);
	return (error);
}
コード例 #6
0
/*
 * For backward compatibility.
 */
int
sys_cap_new(struct thread *td, struct cap_new_args *uap)
{
	struct filedesc *fdp;
	cap_rights_t rights;
	register_t newfd;
	int error, fd;

	fd = uap->fd;
	rights = uap->rights;

	AUDIT_ARG_FD(fd);
	AUDIT_ARG_RIGHTS(rights);

	if ((rights & ~CAP_ALL) != 0)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	FILEDESC_SLOCK(fdp);
	if (fget_locked(fdp, fd) == NULL) {
		FILEDESC_SUNLOCK(fdp);
		return (EBADF);
	}
	error = _cap_check(cap_rights(fdp, fd), rights, CAPFAIL_INCREASE);
	FILEDESC_SUNLOCK(fdp);
	if (error != 0)
		return (error);

	error = do_dup(td, 0, fd, 0, &newfd);
	if (error != 0)
		return (error);

	FILEDESC_XLOCK(fdp);
	/*
	 * We don't really care about the race between checking capability
	 * rights for the source descriptor and now. If capability rights
	 * were ok at that earlier point, the process had this descriptor
	 * with those rights, so we don't increase them in security sense,
	 * the process might have done the cap_new(2) a bit earlier to get
	 * the same effect.
	 */
	fdp->fd_ofiles[newfd].fde_rights = rights;
	if ((rights & CAP_IOCTL) == 0) {
		free(fdp->fd_ofiles[newfd].fde_ioctls, M_TEMP);
		fdp->fd_ofiles[newfd].fde_ioctls = NULL;
		fdp->fd_ofiles[newfd].fde_nioctls = 0;
	}
	if ((rights & CAP_FCNTL) == 0)
		fdp->fd_ofiles[newfd].fde_fcntls = 0;
	FILEDESC_XUNLOCK(fdp);

	td->td_retval[0] = newfd;

	return (0);
}
コード例 #7
0
int
sys_cap_ioctls_limit(struct thread *td, struct cap_ioctls_limit_args *uap)
{
	struct filedesc *fdp;
	u_long *cmds, *ocmds;
	size_t ncmds;
	int error, fd;

	fd = uap->fd;
	ncmds = uap->ncmds;

	AUDIT_ARG_FD(fd);

	if (ncmds > 256)	/* XXX: Is 256 sane? */
		return (EINVAL);

	if (ncmds == 0) {
		cmds = NULL;
	} else {
		cmds = malloc(sizeof(cmds[0]) * ncmds, M_TEMP, M_WAITOK);
		error = copyin(uap->cmds, cmds, sizeof(cmds[0]) * ncmds);
		if (error != 0) {
			free(cmds, M_TEMP);
			return (error);
		}
	}

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);

	if (fget_locked(fdp, fd) == NULL) {
		error = EBADF;
		goto out;
	}

	error = cap_ioctl_limit_check(fdp, fd, cmds, ncmds);
	if (error != 0)
		goto out;

	ocmds = fdp->fd_ofiles[fd].fde_ioctls;
	fdp->fd_ofiles[fd].fde_ioctls = cmds;
	fdp->fd_ofiles[fd].fde_nioctls = ncmds;

	cmds = ocmds;
	error = 0;
out:
	FILEDESC_XUNLOCK(fdp);
	free(cmds, M_TEMP);
	return (error);
}
コード例 #8
0
int
kern_cap_ioctls_limit(struct thread *td, int fd, u_long *cmds, size_t ncmds)
{
	struct filedesc *fdp;
	struct filedescent *fdep;
	u_long *ocmds;
	int error;

	AUDIT_ARG_FD(fd);

	if (ncmds > IOCTLS_MAX_COUNT) {
		error = EINVAL;
		goto out_free;
	}

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);

	fdep = fdeget_locked(fdp, fd);
	if (fdep == NULL) {
		error = EBADF;
		goto out;
	}

	error = cap_ioctl_limit_check(fdep, cmds, ncmds);
	if (error != 0)
		goto out;

	ocmds = fdep->fde_ioctls;
	fdep->fde_ioctls = cmds;
	fdep->fde_nioctls = ncmds;

	cmds = ocmds;
	error = 0;
out:
	FILEDESC_XUNLOCK(fdp);
out_free:
	free(cmds, M_FILECAPS);
	return (error);
}
コード例 #9
0
/*
 * System call to limit rights of the given capability.
 */
int
sys_cap_rights_limit(struct thread *td, struct cap_rights_limit_args *uap)
{
	struct filedesc *fdp;
	cap_rights_t rights;
	int error, fd;

	fd = uap->fd;
	rights = uap->rights;

	AUDIT_ARG_FD(fd);
	AUDIT_ARG_RIGHTS(rights);

	if ((rights & ~CAP_ALL) != 0)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);
	if (fget_locked(fdp, fd) == NULL) {
		FILEDESC_XUNLOCK(fdp);
		return (EBADF);
	}
	error = _cap_check(cap_rights(fdp, fd), rights, CAPFAIL_INCREASE);
	if (error == 0) {
		fdp->fd_ofiles[fd].fde_rights = rights;
		if ((rights & CAP_IOCTL) == 0) {
			free(fdp->fd_ofiles[fd].fde_ioctls, M_TEMP);
			fdp->fd_ofiles[fd].fde_ioctls = NULL;
			fdp->fd_ofiles[fd].fde_nioctls = 0;
		}
		if ((rights & CAP_FCNTL) == 0)
			fdp->fd_ofiles[fd].fde_fcntls = 0;
	}
	FILEDESC_XUNLOCK(fdp);
	return (error);
}
コード例 #10
0
ファイル: uipc_shm.c プロジェクト: BillTheBest/libuinet
/* System calls. */
int
sys_shm_open(struct thread *td, struct shm_open_args *uap)
{
	struct filedesc *fdp;
	struct shmfd *shmfd;
	struct file *fp;
	char *path;
	Fnv32_t fnv;
	mode_t cmode;
	int fd, error;

#ifdef CAPABILITY_MODE
	/*
	 * shm_open(2) is only allowed for anonymous objects.
	 */
	if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
		return (ECAPMODE);
#endif

	if ((uap->flags & O_ACCMODE) != O_RDONLY &&
	    (uap->flags & O_ACCMODE) != O_RDWR)
		return (EINVAL);

	if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC)) != 0)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;

	error = falloc(td, &fp, &fd, 0);
	if (error)
		return (error);

	/* A SHM_ANON path pointer creates an anonymous object. */
	if (uap->path == SHM_ANON) {
		/* A read-only anonymous object is pointless. */
		if ((uap->flags & O_ACCMODE) == O_RDONLY) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			return (EINVAL);
		}
		shmfd = shm_alloc(td->td_ucred, cmode);
	} else {
		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
		error = copyinstr(uap->path, path, MAXPATHLEN, NULL);

		/* Require paths to start with a '/' character. */
		if (error == 0 && path[0] != '/')
			error = EINVAL;
		if (error) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			free(path, M_SHMFD);
			return (error);
		}

		fnv = fnv_32_str(path, FNV1_32_INIT);
		sx_xlock(&shm_dict_lock);
		shmfd = shm_lookup(path, fnv);
		if (shmfd == NULL) {
			/* Object does not yet exist, create it if requested. */
			if (uap->flags & O_CREAT) {
#ifdef MAC
				error = mac_posixshm_check_create(td->td_ucred,
				    path);
				if (error == 0) {
#endif
					shmfd = shm_alloc(td->td_ucred, cmode);
					shm_insert(path, fnv, shmfd);
#ifdef MAC
				}
#endif
			} else {
				free(path, M_SHMFD);
				error = ENOENT;
			}
		} else {
			/*
			 * Object already exists, obtain a new
			 * reference if requested and permitted.
			 */
			free(path, M_SHMFD);
			if ((uap->flags & (O_CREAT | O_EXCL)) ==
			    (O_CREAT | O_EXCL))
				error = EEXIST;
			else {
#ifdef MAC
				error = mac_posixshm_check_open(td->td_ucred,
				    shmfd, FFLAGS(uap->flags & O_ACCMODE));
				if (error == 0)
#endif
				error = shm_access(shmfd, td->td_ucred,
				    FFLAGS(uap->flags & O_ACCMODE));
			}

			/*
			 * Truncate the file back to zero length if
			 * O_TRUNC was specified and the object was
			 * opened with read/write.
			 */
			if (error == 0 &&
			    (uap->flags & (O_ACCMODE | O_TRUNC)) ==
			    (O_RDWR | O_TRUNC)) {
#ifdef MAC
				error = mac_posixshm_check_truncate(
					td->td_ucred, fp->f_cred, shmfd);
				if (error == 0)
#endif
					shm_dotruncate(shmfd, 0);
			}
			if (error == 0)
				shm_hold(shmfd);
		}
		sx_xunlock(&shm_dict_lock);

		if (error) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			return (error);
		}
	}

	finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);

	FILEDESC_XLOCK(fdp);
	if (fdp->fd_ofiles[fd] == fp)
		fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
	FILEDESC_XUNLOCK(fdp);
	td->td_retval[0] = fd;
	fdrop(fp, td);

	return (0);
}
コード例 #11
0
ファイル: kern_fork.c プロジェクト: mulichao/freebsd
static void
do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
    struct vmspace *vm2, struct file *fp_procdesc)
{
	struct proc *p1, *pptr;
	int trypid;
	struct filedesc *fd;
	struct filedesc_to_leader *fdtol;
	struct sigacts *newsigacts;

	sx_assert(&proctree_lock, SX_SLOCKED);
	sx_assert(&allproc_lock, SX_XLOCKED);

	p1 = td->td_proc;

	trypid = fork_findpid(fr->fr_flags);

	sx_sunlock(&proctree_lock);

	p2->p_state = PRS_NEW;		/* protect against others */
	p2->p_pid = trypid;
	AUDIT_ARG_PID(p2->p_pid);
	LIST_INSERT_HEAD(&allproc, p2, p_list);
	allproc_gen++;
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	tidhash_add(td2);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	sx_xunlock(&allproc_lock);

	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    __rangeof(struct proc, p_startcopy, p_endcopy));
	pargs_hold(p2->p_args);

	PROC_UNLOCK(p1);

	bzero(&p2->p_startzero,
	    __rangeof(struct proc, p_startzero, p_endzero));

	/* Tell the prison that we exist. */
	prison_proc_hold(p2->p_ucred->cr_prison);

	PROC_UNLOCK(p2);

	/*
	 * Malloc things while we don't hold any locks.
	 */
	if (fr->fr_flags & RFSIGSHARE)
		newsigacts = NULL;
	else
		newsigacts = sigacts_alloc();

	/*
	 * Copy filedesc.
	 */
	if (fr->fr_flags & RFCFDG) {
		fd = fdinit(p1->p_fd, false);
		fdtol = NULL;
	} else if (fr->fr_flags & RFFDG) {
		fd = fdcopy(p1->p_fd);
		fdtol = NULL;
	} else {
		fd = fdshare(p1->p_fd);
		if (p1->p_fdtol == NULL)
			p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
			    p1->p_leader);
		if ((fr->fr_flags & RFTHREAD) != 0) {
			/*
			 * Shared file descriptor table, and shared
			 * process leaders.
			 */
			fdtol = p1->p_fdtol;
			FILEDESC_XLOCK(p1->p_fd);
			fdtol->fdl_refcount++;
			FILEDESC_XUNLOCK(p1->p_fd);
		} else {
			/* 
			 * Shared file descriptor table, and different
			 * process leaders.
			 */
			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
			    p1->p_fd, p2);
		}
	}
	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */

	PROC_LOCK(p2);
	PROC_LOCK(p1);

	bzero(&td2->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));

	bcopy(&td->td_startcopy, &td2->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
	td2->td_sigstk = td->td_sigstk;
	td2->td_flags = TDF_INMEM;
	td2->td_lend_user_pri = PRI_MAX;

#ifdef VIMAGE
	td2->td_vnet = NULL;
	td2->td_vnet_lpush = NULL;
#endif

	/*
	 * Allow the scheduler to initialize the child.
	 */
	thread_lock(td);
	sched_fork(td, td2);
	thread_unlock(td);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 */
	p2->p_flag = P_INMEM;
	p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP);
	p2->p_swtick = ticks;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);

	/*
	 * Whilst the proc lock is held, copy the VM domain data out
	 * using the VM domain method.
	 */
	vm_domain_policy_init(&p2->p_vm_dom_policy);
	vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
	    &p1->p_vm_dom_policy);

	if (fr->fr_flags & RFSIGSHARE) {
		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
	} else {
		sigacts_copy(newsigacts, p1->p_sigacts);
		p2->p_sigacts = newsigacts;
	}

	if (fr->fr_flags & RFTSIGZMB)
	        p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
	else if (fr->fr_flags & RFLINUXTHPN)
	        p2->p_sigparent = SIGUSR1;
	else
	        p2->p_sigparent = SIGCHLD;

	p2->p_textvp = p1->p_textvp;
	p2->p_fd = fd;
	p2->p_fdtol = fdtol;

	if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
		p2->p_flag |= P_PROTECTED;
		p2->p_flag2 |= P2_INHERIT_PROTECTED;
	}

	/*
	 * p_limit is copy-on-write.  Bump its refcount.
	 */
	lim_fork(p1, p2);

	thread_cow_get_proc(td2, p2);

	pstats_fork(p1->p_stats, p2->p_stats);

	PROC_UNLOCK(p1);
	PROC_UNLOCK(p2);

	/* Bump references to the text vnode (for procfs). */
	if (p2->p_textvp)
		vrefact(p2->p_textvp);

	/*
	 * Set up linkage for kernel based threading.
	 */
	if ((fr->fr_flags & RFTHREAD) != 0) {
		mtx_lock(&ppeers_lock);
		p2->p_peers = p1->p_peers;
		p1->p_peers = p2;
		p2->p_leader = p1->p_leader;
		mtx_unlock(&ppeers_lock);
		PROC_LOCK(p1->p_leader);
		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
			PROC_UNLOCK(p1->p_leader);
			/*
			 * The task leader is exiting, so process p1 is
			 * going to be killed shortly.  Since p1 obviously
			 * isn't dead yet, we know that the leader is either
			 * sending SIGKILL's to all the processes in this
			 * task or is sleeping waiting for all the peers to
			 * exit.  We let p1 complete the fork, but we need
			 * to go ahead and kill the new process p2 since
			 * the task leader may not get a chance to send
			 * SIGKILL to it.  We leave it on the list so that
			 * the task leader will wait for this new process
			 * to commit suicide.
			 */
			PROC_LOCK(p2);
			kern_psignal(p2, SIGKILL);
			PROC_UNLOCK(p2);
		} else
			PROC_UNLOCK(p1->p_leader);
	} else {
		p2->p_peers = NULL;
		p2->p_leader = p2;
	}

	sx_xlock(&proctree_lock);
	PGRP_LOCK(p1->p_pgrp);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	/*
	 * Preserve some more flags in subprocess.  P_PROFIL has already
	 * been preserved.
	 */
	p2->p_flag |= p1->p_flag & P_SUGID;
	td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
	SESS_LOCK(p1->p_session);
	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		p2->p_flag |= P_CONTROLT;
	SESS_UNLOCK(p1->p_session);
	if (fr->fr_flags & RFPPWAIT)
		p2->p_flag |= P_PPWAIT;

	p2->p_pgrp = p1->p_pgrp;
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	PGRP_UNLOCK(p1->p_pgrp);
	LIST_INIT(&p2->p_children);
	LIST_INIT(&p2->p_orphans);

	callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);

	/*
	 * If PF_FORK is set, the child process inherits the
	 * procfs ioctl flags from its parent.
	 */
	if (p1->p_pfsflags & PF_FORK) {
		p2->p_stops = p1->p_stops;
		p2->p_pfsflags = p1->p_pfsflags;
	}

	/*
	 * This begins the section where we must prevent the parent
	 * from being swapped.
	 */
	_PHOLD(p1);
	PROC_UNLOCK(p1);

	/*
	 * Attach the new process to its parent.
	 *
	 * If RFNOWAIT is set, the newly created process becomes a child
	 * of init.  This effectively disassociates the child from the
	 * parent.
	 */
	if ((fr->fr_flags & RFNOWAIT) != 0) {
		pptr = p1->p_reaper;
		p2->p_reaper = pptr;
	} else {
		p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
		    p1 : p1->p_reaper;
		pptr = p1;
	}
	p2->p_pptr = pptr;
	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
	LIST_INIT(&p2->p_reaplist);
	LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
	if (p2->p_reaper == p1)
		p2->p_reapsubtree = p2->p_pid;
	sx_xunlock(&proctree_lock);

	/* Inform accounting that we have forked. */
	p2->p_acflag = AFORK;
	PROC_UNLOCK(p2);

#ifdef KTRACE
	ktrprocfork(p1, p2);
#endif

	/*
	 * Finish creating the child process.  It will return via a different
	 * execution path later.  (ie: directly into user mode)
	 */
	vm_forkproc(td, p2, td2, vm2, fr->fr_flags);

	if (fr->fr_flags == (RFFDG | RFPROC)) {
		VM_CNT_INC(v_forks);
		VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
		VM_CNT_INC(v_vforks);
		VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (p1 == &proc0) {
		VM_CNT_INC(v_kthreads);
		VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else {
		VM_CNT_INC(v_rforks);
		VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	}

	/*
	 * Associate the process descriptor with the process before anything
	 * can happen that might cause that process to need the descriptor.
	 * However, don't do this until after fork(2) can no longer fail.
	 */
	if (fr->fr_flags & RFPROCDESC)
		procdesc_new(p2, fr->fr_pd_flags);

	/*
	 * Both processes are set up, now check if any loadable modules want
	 * to adjust anything.
	 */
	EVENTHANDLER_INVOKE(process_fork, p1, p2, fr->fr_flags);

	/*
	 * Set the child start time and mark the process as being complete.
	 */
	PROC_LOCK(p2);
	PROC_LOCK(p1);
	microuptime(&p2->p_stats->p_start);
	PROC_SLOCK(p2);
	p2->p_state = PRS_NORMAL;
	PROC_SUNLOCK(p2);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the new process so that any
	 * tracepoints inherited from the parent can be removed. We have to do
	 * this only after p_state is PRS_NORMAL since the fasttrap module will
	 * use pfind() later on.
	 */
	if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
		dtrace_fasttrap_fork(p1, p2);
#endif
	/*
	 * Hold the process so that it cannot exit after we make it runnable,
	 * but before we wait for the debugger.
	 */
	_PHOLD(p2);
	if (p1->p_ptevents & PTRACE_FORK) {
		/*
		 * Arrange for debugger to receive the fork event.
		 *
		 * We can report PL_FLAG_FORKED regardless of
		 * P_FOLLOWFORK settings, but it does not make a sense
		 * for runaway child.
		 */
		td->td_dbgflags |= TDB_FORK;
		td->td_dbg_forked = p2->p_pid;
		td2->td_dbgflags |= TDB_STOPATFORK;
	}
	if (fr->fr_flags & RFPPWAIT) {
		td->td_pflags |= TDP_RFPPWAIT;
		td->td_rfppwait_p = p2;
		td->td_dbgflags |= TDB_VFORK;
	}
	PROC_UNLOCK(p2);

	/*
	 * Now can be swapped.
	 */
	_PRELE(p1);
	PROC_UNLOCK(p1);

	/*
	 * Tell any interested parties about the new process.
	 */
	knote_fork(p1->p_klist, p2->p_pid);
	SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);

	if (fr->fr_flags & RFPROCDESC) {
		procdesc_finit(p2->p_procdesc, fp_procdesc);
		fdrop(fp_procdesc, td);
	}

	if ((fr->fr_flags & RFSTOPPED) == 0) {
		/*
		 * If RFSTOPPED not requested, make child runnable and
		 * add to run queue.
		 */
		thread_lock(td2);
		TD_SET_CAN_RUN(td2);
		sched_add(td2, SRQ_BORING);
		thread_unlock(td2);
		if (fr->fr_pidp != NULL)
			*fr->fr_pidp = p2->p_pid;
	} else {
		*fr->fr_procp = p2;
	}

	PROC_LOCK(p2);
	/*
	 * Wait until debugger is attached to child.
	 */
	while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
		cv_wait(&p2->p_dbgwait, &p2->p_mtx);
	_PRELE(p2);
	racct_proc_fork_done(p2);
	PROC_UNLOCK(p2);
}
コード例 #12
0
ファイル: sys_capability.c プロジェクト: ChaosJohn/freebsd
/*
 * System call to limit rights of the given capability.
 */
int
sys_cap_rights_limit(struct thread *td, struct cap_rights_limit_args *uap)
{
	struct filedesc *fdp;
	cap_rights_t rights;
	int error, fd, version;

	cap_rights_init(&rights);

	error = copyin(uap->rightsp, &rights, sizeof(rights.cr_rights[0]));
	if (error != 0)
		return (error);
	version = CAPVER(&rights);
	if (version != CAP_RIGHTS_VERSION_00)
		return (EINVAL);

	error = copyin(uap->rightsp, &rights,
	    sizeof(rights.cr_rights[0]) * CAPARSIZE(&rights));
	if (error != 0)
		return (error);
	/* Check for race. */
	if (CAPVER(&rights) != version)
		return (EINVAL);

	if (!cap_rights_is_valid(&rights))
		return (EINVAL);

	if (version != CAP_RIGHTS_VERSION) {
		rights.cr_rights[0] &= ~(0x3ULL << 62);
		rights.cr_rights[0] |= ((uint64_t)CAP_RIGHTS_VERSION << 62);
	}
#ifdef KTRACE
	if (KTRPOINT(td, KTR_STRUCT))
		ktrcaprights(&rights);
#endif

	fd = uap->fd;

	AUDIT_ARG_FD(fd);
	AUDIT_ARG_RIGHTS(&rights);

	fdp = td->td_proc->p_fd;
	FILEDESC_XLOCK(fdp);
	if (fget_locked(fdp, fd) == NULL) {
		FILEDESC_XUNLOCK(fdp);
		return (EBADF);
	}
	error = _cap_check(cap_rights(fdp, fd), &rights, CAPFAIL_INCREASE);
	if (error == 0) {
		fdp->fd_ofiles[fd].fde_rights = rights;
		if (!cap_rights_is_set(&rights, CAP_IOCTL)) {
			free(fdp->fd_ofiles[fd].fde_ioctls, M_FILECAPS);
			fdp->fd_ofiles[fd].fde_ioctls = NULL;
			fdp->fd_ofiles[fd].fde_nioctls = 0;
		}
		if (!cap_rights_is_set(&rights, CAP_FCNTL))
			fdp->fd_ofiles[fd].fde_fcntls = 0;
	}
	FILEDESC_XUNLOCK(fdp);
	return (error);
}
コード例 #13
0
/* Other helper routines. */
static int
ksem_create(struct thread *td, const char *name, semid_t *semidp, mode_t mode,
    unsigned int value, int flags, int compat32)
{
	struct filedesc *fdp;
	struct ksem *ks;
	struct file *fp;
	char *path;
	Fnv32_t fnv;
	int error, fd;

	if (value > SEM_VALUE_MAX)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	mode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
	error = falloc(td, &fp, &fd);
	if (error) {
		if (name == NULL)
			error = ENOSPC;
		return (error);
	}

	/*
	 * Go ahead and copyout the file descriptor now.  This is a bit
	 * premature, but it is a lot easier to handle errors as opposed
	 * to later when we've possibly created a new semaphore, etc.
	 */
	error = ksem_create_copyout_semid(td, semidp, fd, compat32);
	if (error) {
		fdclose(fdp, fp, fd, td);
		fdrop(fp, td);
		return (error);
	}

	if (name == NULL) {
		/* Create an anonymous semaphore. */
		ks = ksem_alloc(td->td_ucred, mode, value);
		if (ks == NULL)
			error = ENOSPC;
		else
			ks->ks_flags |= KS_ANONYMOUS;
	} else {
		path = malloc(MAXPATHLEN, M_KSEM, M_WAITOK);
		error = copyinstr(name, path, MAXPATHLEN, NULL);

		/* Require paths to start with a '/' character. */
		if (error == 0 && path[0] != '/')
			error = EINVAL;
		if (error) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			free(path, M_KSEM);
			return (error);
		}

		fnv = fnv_32_str(path, FNV1_32_INIT);
		sx_xlock(&ksem_dict_lock);
		ks = ksem_lookup(path, fnv);
		if (ks == NULL) {
			/* Object does not exist, create it if requested. */
			if (flags & O_CREAT) {
				ks = ksem_alloc(td->td_ucred, mode, value);
				if (ks == NULL)
					error = ENFILE;
				else {
					ksem_insert(path, fnv, ks);
					path = NULL;
				}
			} else
				error = ENOENT;
		} else {
			/*
			 * Object already exists, obtain a new
			 * reference if requested and permitted.
			 */
			if ((flags & (O_CREAT | O_EXCL)) ==
			    (O_CREAT | O_EXCL))
				error = EEXIST;
			else {
#ifdef MAC
				error = mac_posixsem_check_open(td->td_ucred,
				    ks);
				if (error == 0)
#endif
				error = ksem_access(ks, td->td_ucred);
			}
			if (error == 0)
				ksem_hold(ks);
#ifdef INVARIANTS
			else
				ks = NULL;
#endif
		}
		sx_xunlock(&ksem_dict_lock);
		if (path)
			free(path, M_KSEM);
	}

	if (error) {
		KASSERT(ks == NULL, ("ksem_create error with a ksem"));
		fdclose(fdp, fp, fd, td);
		fdrop(fp, td);
		return (error);
	}
	KASSERT(ks != NULL, ("ksem_create w/o a ksem"));

	finit(fp, FREAD | FWRITE, DTYPE_SEM, ks, &ksem_ops);

	FILEDESC_XLOCK(fdp);
	if (fdp->fd_ofiles[fd] == fp)
		fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
	FILEDESC_XUNLOCK(fdp);
	fdrop(fp, td);

	return (0);
}