Exemplo n.º 1
0
/*
 * Simplified back end of syscall(), used when returning from fork()
 * directly into user mode.
 *
 * This code will return back into the fork trampoline code which then
 * runs doreti.
 */
void
generic_lwp_return(struct lwp *lp, struct trapframe *frame)
{
	struct proc *p = lp->lwp_proc;

	/*
	 * Check for exit-race.  If one lwp exits the process concurrent with
	 * another lwp creating a new thread, the two operations may cross
	 * each other resulting in the newly-created lwp not receiving a
	 * KILL signal.
	 */
	if (p->p_flags & P_WEXIT) {
		lwpsignal(p, lp, SIGKILL);
	}

	/*
	 * Newly forked processes are given a kernel priority.  We have to
	 * adjust the priority to a normal user priority and fake entry
	 * into the kernel (call userenter()) to install a passive release
	 * function just in case userret() decides to stop the process.  This
	 * can occur when ^Z races a fork.  If we do not install the passive
	 * release function the current process designation will not be
	 * released when the thread goes to sleep.
	 */
	lwkt_setpri_self(TDPRI_USER_NORM);
	userenter(lp->lwp_thread, p);
	userret(lp, frame, 0);
#ifdef KTRACE
	if (KTRPOINT(lp->lwp_thread, KTR_SYSRET))
		ktrsysret(lp, SYS_fork, 0, 0);
#endif
	lp->lwp_flags |= LWP_PASSIVE_ACQ;
	userexit(lp);
	lp->lwp_flags &= ~LWP_PASSIVE_ACQ;
}
Exemplo n.º 2
0
int
smbfs_writevnode(struct vnode *vp, struct uio *uiop,
		 struct ucred *cred, int ioflag)
{
	struct thread *td;
	struct smbmount *smp = VTOSMBFS(vp);
	struct smbnode *np = VTOSMB(vp);
	struct smb_cred scred;
	int error = 0;

	if (vp->v_type != VREG) {
		SMBERROR("vn types other than VREG unsupported !\n");
		return EIO;
	}
	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
	if (uiop->uio_offset < 0)
		return EINVAL;
	td = uiop->uio_td;
	if (ioflag & (IO_APPEND | IO_SYNC)) {
		if (np->n_flag & NMODIFIED) {
			smbfs_attr_cacheremove(vp);
			error = smbfs_vinvalbuf(vp, V_SAVE, 1);
			if (error)
				return error;
		}
		if (ioflag & IO_APPEND) {
#if 0 /* notyet */
			/*
			 * File size can be changed by another client
			 */
			smbfs_attr_cacheremove(vp);
			error = VOP_GETATTR(vp, &vattr);
			if (error) return (error);
#endif
			uiop->uio_offset = np->n_size;
		}
	}
	if (uiop->uio_resid == 0)
		return 0;
	if (td->td_proc &&
	    uiop->uio_offset + uiop->uio_resid >
	    td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
		return EFBIG;
	}
	smb_makescred(&scred, td, cred);
	error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
	SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
	if (!error) {
		if (uiop->uio_offset > np->n_size) {
			np->n_size = uiop->uio_offset;
			vnode_pager_setsize(vp, np->n_size);
		}
	}
	return error;
}
Exemplo n.º 3
0
int
nwfs_writevnode(struct vnode *vp, struct uio *uiop, struct ucred *cred,
		int ioflag)
{
	struct nwmount *nmp = VTONWFS(vp);
	struct nwnode *np = VTONW(vp);
	struct thread *td;
/*	struct vattr vattr;*/
	int error = 0;

	if (vp->v_type != VREG) {
		kprintf("%s: vn types other than VREG unsupported !\n",__func__);
		return EIO;
	}
	NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
	if (uiop->uio_offset < 0) return EINVAL;
	td = uiop->uio_td;
	if (ioflag & (IO_APPEND | IO_SYNC)) {
		if (np->n_flag & NMODIFIED) {
			nwfs_attr_cacheremove(vp);
			error = nwfs_vinvalbuf(vp, V_SAVE, 1);
			if (error) return (error);
		}
		if (ioflag & IO_APPEND) {
		/* We can relay only on local information about file size,
		 * because until file is closed NetWare will not return
		 * the correct size. */
#if 0 /* notyet */
			nwfs_attr_cacheremove(vp);
			error = VOP_GETATTR(vp, &vattr);
			if (error) return (error);
#endif
			uiop->uio_offset = np->n_size;
		}
	}
	if (uiop->uio_resid == 0) return 0;
	if (td->td_proc && uiop->uio_offset + uiop->uio_resid > 
	    td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
		return (EFBIG);
	}
	error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cred);
	NCPVNDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid);
	if (!error) {
		if (uiop->uio_offset > np->n_size) {
			np->n_vattr.va_size = np->n_size = uiop->uio_offset;
			vnode_pager_setsize(vp, np->n_size);
		}
	}
	return (error);
}
Exemplo n.º 4
0
/*
 * Kill all LWPs except the current one.  Do not try to signal
 * LWPs which have exited on their own or have already been
 * signaled.
 */
static void
killlwps(struct lwp *lp)
{
	struct proc *p = lp->lwp_proc;
	struct lwp *tlp;

	/*
	 * Kill the remaining LWPs.  We must send the signal before setting
	 * LWP_WEXIT.  The setting of WEXIT is optional but helps reduce
	 * races.  tlp must be held across the call as it might block and
	 * allow the target lwp to rip itself out from under our loop.
	 */
	FOREACH_LWP_IN_PROC(tlp, p) {
		LWPHOLD(tlp);
		if ((tlp->lwp_flag & LWP_WEXIT) == 0) {
			lwpsignal(p, tlp, SIGKILL);
			tlp->lwp_flag |= LWP_WEXIT;
		}
		LWPRELE(tlp);
	}
Exemplo n.º 5
0
/*
 * MPSAFE
 */
int
soo_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
{
	struct socket *so;
	int error;
	int msgflags;

	so = (struct socket *)fp->f_data;

	if (fflags & O_FBLOCKING)
		msgflags = 0;
	else if (fflags & O_FNONBLOCKING)
		msgflags = MSG_FNONBLOCKING;
	else if (fp->f_flag & FNONBLOCK)
		msgflags = MSG_FNONBLOCKING;
	else
		msgflags = 0;

	error = so_pru_sosend(so, NULL, uio, NULL, NULL, msgflags, uio->uio_td);
	if (error == EPIPE && !(fflags & MSG_NOSIGNAL) &&
	    !(so->so_options & SO_NOSIGPIPE))
		lwpsignal(uio->uio_td->td_proc, uio->uio_td->td_lwp, SIGPIPE);
	return (error);
}
Exemplo n.º 6
0
static int
ckpt_thaw_proc(struct lwp *lp, struct file *fp)
{
	struct proc *p = lp->lwp_proc;
	Elf_Phdr *phdr = NULL;
	Elf_Ehdr *ehdr = NULL;
	int error;
	size_t nbyte;

	TRACE_ENTER;
	
	ehdr = kmalloc(sizeof(Elf_Ehdr), M_TEMP, M_ZERO | M_WAITOK);

	if ((error = elf_gethdr(fp, ehdr)) != 0)
		goto done;
	nbyte = sizeof(Elf_Phdr) * ehdr->e_phnum; 
	phdr = kmalloc(nbyte, M_TEMP, M_WAITOK); 

	/* fetch description of program writable mappings */
	if ((error = elf_getphdrs(fp, phdr, nbyte)) != 0)
		goto done;

	/* fetch notes section containing register state */
	if ((error = elf_getnotes(lp, fp, phdr->p_filesz)) != 0)
		goto done;

	/* fetch program text vnodes */
	if ((error = elf_gettextvp(p, fp)) != 0)
		goto done;

	/* fetch signal disposition */
	if ((error = elf_getsigs(lp, fp)) != 0) {
		kprintf("failure in recovering signals\n");
		goto done;
	}

	/* fetch open files */
	if ((error = elf_getfiles(lp, fp)) != 0)
		goto done;

	/* handle mappings last in case we are reading from a socket */
	error = elf_loadphdrs(fp, phdr, ehdr->e_phnum);

	/*
	 * Set the textvp to the checkpoint file and mark the vnode so
	 * a future checkpointing of this checkpoint-restored program
	 * will copy out the contents of the mappings rather then trying
	 * to record the vnode info related to the checkpoint file, which
	 * is likely going to be destroyed when the program is re-checkpointed.
	 */
	if (error == 0 && fp->f_data && fp->f_type == DTYPE_VNODE) {
		if (p->p_textvp)
			vrele(p->p_textvp);
		p->p_textvp = (struct vnode *)fp->f_data;
		vsetflags(p->p_textvp, VCKPT);
		vref(p->p_textvp);
	}
done:
	if (ehdr)
		kfree(ehdr, M_TEMP);
	if (phdr)
		kfree(phdr, M_TEMP);
	
	lwpsignal(p, lp, 35);
	TRACE_EXIT;
	return error;
}