Пример #1
0
/*
 * NOTE: Must be called with tty_token held
 */
static void
ptcwakeup(struct tty *tp, int flag)
{
	ASSERT_LWKT_TOKEN_HELD(&tty_token);

	if (flag & FREAD) {
		wakeup(TSA_PTC_READ(tp));
		KNOTE(&tp->t_rkq.ki_note, 0);
	}
	if (flag & FWRITE) {
		wakeup(TSA_PTC_WRITE(tp));
		KNOTE(&tp->t_wkq.ki_note, 0);
	}
}
Пример #2
0
/*
 * Bus independant device detachment routine.  Makes sure all
 * allocated resources are freed, callouts disabled and waiting
 * processes unblocked.
 */
int
cmx_detach(device_t dev)
{
	struct cmx_softc *sc = device_get_softc(dev);

	DEBUG_printf(dev, "called\n");

	sc->dying = 1;

	CMX_LOCK(sc);
	if (sc->polling) {
		DEBUG_printf(sc->dev, "disabling polling\n");
		callout_stop(&sc->ch);
		sc->polling = 0;
		CMX_UNLOCK(sc);
		KNOTE(&sc->kq.ki_note, 0);
	} else {
		CMX_UNLOCK(sc);
	}

	wakeup(sc);
	DEBUG_printf(dev, "releasing resources\n");
	cmx_release_resources(dev);
	dev_ops_remove_minor(&cmx_ops, device_get_unit(dev));

	return 0;
}
Пример #3
0
/*---------------------------------------------------------------------------*
 *	i4bputqueue_hipri - put message into front of queue to userland
 *---------------------------------------------------------------------------*/
void
i4bputqueue_hipri(struct mbuf *m)
{
    if(!openflag)
    {
        i4b_Dfreembuf(m);
        return;
    }

    crit_enter();

    if(IF_QFULL(&i4b_rdqueue))
    {
        struct mbuf *m1;
        IF_DEQUEUE(&i4b_rdqueue, m1);
        i4b_Dfreembuf(m1);
        NDBGL4(L4_ERR, "ERROR, queue full, removing entry!");
    }

    IF_PREPEND(&i4b_rdqueue, m);

    crit_exit();

    if(readflag)
    {
        readflag = 0;
        wakeup((caddr_t) &i4b_rdqueue);
    }

    KNOTE(&kq_rd_info.ki_note, 0);
}
Пример #4
0
/*
 * NOTE: Must be called with tty_token held
 */
static int
snp_detach(struct snoop *snp)
{
	struct tty *tp;

	ASSERT_LWKT_TOKEN_HELD(&tty_token);
	snp->snp_base = 0;
	snp->snp_len = 0;

	/*
	 * If line disc. changed we do not touch this pointer, SLIP/PPP will
	 * change it anyway.
	 */
	tp = snp->snp_tty;
	if (tp == NULL)
		goto detach_notty;

	if (tp && (tp->t_sc == snp) && (tp->t_state & TS_SNOOP) &&
	    tp->t_line == snooplinedisc) {
		tp->t_sc = NULL;
		tp->t_state &= ~TS_SNOOP;
		tp->t_line = snp->snp_olddisc;
	} else
		kprintf("Snoop: bad attached tty data.\n");

	snp->snp_tty = NULL;
	snp->snp_target = NULL;

detach_notty:
	KNOTE(&snp->snp_kq.ki_note, 0);
	if ((snp->snp_flags & SNOOP_OPEN) == 0) 
		kfree(snp, M_SNP);

	return (0);
}
Пример #5
0
void
ptcwakeup(struct tty *tp, int flag)
{
	struct pt_softc *pti = pt_softc[minor(tp->t_dev)];

	if (flag & FREAD) {
		selwakeup(&pti->pt_selr);
		wakeup(&tp->t_outq.c_cf);
		KNOTE(&pti->pt_selr.si_note, 0);
	}
	if (flag & FWRITE) {
		selwakeup(&pti->pt_selw);
		wakeup(&tp->t_rawq.c_cf);
		KNOTE(&pti->pt_selw.si_note, 0);
	}
}
Пример #6
0
static __inline
void
tmpfs_knote(struct vnode *vp, int flags)
{
	if (flags)
		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
}
Пример #7
0
/*
 * Close the character device.
 */
static int
cmx_close(struct dev_close_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct cmx_softc *sc;

	sc = devclass_get_softc(cmx_devclass, minor(dev));
	if (sc == NULL || sc->dying)
		return ENXIO;

	CMX_LOCK(sc);
	if (!sc->open) {
		CMX_UNLOCK(sc);
		return EINVAL;
	}
	if (sc->polling) {
		DEBUG_printf(sc->dev, "disabling polling\n");
		callout_stop(&sc->ch);
		sc->polling = 0;
		CMX_UNLOCK(sc);
		KNOTE(&sc->kq.ki_note, 0);
		CMX_LOCK(sc);
	}
	sc->open = 0;
	CMX_UNLOCK(sc);

	DEBUG_printf(sc->dev, "close (flags=%b thread=%p)\n",
			ap->a_fflag, MODEBITS, curthread);
	return 0;
}
Пример #8
0
/*
 * If there are processes sleeping on this descriptor, wake them up.
 */
static __inline void
bpf_wakeup(struct bpf_d *d)
{
	wakeup((caddr_t)d);
	if (d->bd_async && d->bd_sig)
		csignal(d->bd_pgid, d->bd_sig,
		    d->bd_siguid, d->bd_sigeuid);

	selwakeup(&d->bd_sel);
	KNOTE(&d->bd_sel.si_note, 0);
}
Пример #9
0
void
pipeselwakeup(struct pipe *cpipe)
{
	if (cpipe->pipe_state & PIPE_SEL) {
		cpipe->pipe_state &= ~PIPE_SEL;
		selwakeup(&cpipe->pipe_sel);
	} else
		KNOTE(&cpipe->pipe_sel.si_note, 0);
	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_pgid != NO_PID)
		gsignal(cpipe->pipe_pgid, SIGIO);
}
Пример #10
0
void
drm_event_wakeup(struct drm_pending_event *e)
{
	struct drm_file *file_priv;
	struct drm_device *dev;

	file_priv = e->file_priv;
	dev = file_priv->dev;
	KKASSERT(lockstatus(&dev->event_lock, curthread) != 0);

	wakeup(&file_priv->event_space);
	KNOTE(&file_priv->dkq.ki_note, 0);
}
Пример #11
0
int
apm_record_event(struct apm_softc *sc, u_int type)
{
	if (!apm_error && (sc->sc_flags & SCFLAG_OPEN) == 0) {
		DPRINTF(("apm_record_event: no user waiting\n"));
		apm_error++;
		return 1;
	}

	apm_evindex++;
	KNOTE(&sc->sc_note, APM_EVENT_COMPOSE(type, apm_evindex));
	return (0);
}
Пример #12
0
int
apm_record_event(struct pxa2x0_apm_softc *sc, u_int type)
{
	static int apm_evindex;

	/* skip if no user waiting */
	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
		return (1);

	apm_evindex++;
	KNOTE(&sc->sc_note, APM_EVENT_COMPOSE(type, apm_evindex));

	return (0);
}
Пример #13
0
/*
 * Periodical callout routine, polling the reader for data
 * availability.  If the reader signals data ready for reading,
 * wakes up the processes which are waiting in select()/poll()/kevent().
 * Otherwise, reschedules itself with a delay of POLL_TICKS.
 */
static void
cmx_tick(void *xsc)
{
	struct cmx_softc *sc = xsc;
	uint8_t bsr;

	CMX_LOCK(sc);
	if (sc->polling && !sc->dying) {
		bsr = cmx_read_BSR(sc);
		DEBUG_printf(sc->dev, "BSR=%b\n", bsr, BSRBITS);
		if (cmx_test(bsr, BSR_BULK_IN_FULL, 1)) {
			sc->polling = 0;
			KNOTE(&sc->kq.ki_note, 0);
		} else {
			callout_reset(&sc->ch, POLL_TICKS, cmx_tick, sc);
		}
	}
	CMX_UNLOCK(sc);
}
Пример #14
0
/*
 * Wakeup processes waiting on a socket buffer.
 * Do asynchronous notification via SIGIO
 * if the socket has the SS_ASYNC flag set.
 */
void
sowakeup(struct socket *so, struct sockbuf *sb)
{
        sb->sb_flags &= ~SB_SEL;
        selwakeup(&sb->sb_sel);
        if (sb->sb_flags & SB_WAIT) {
                sb->sb_flags &= ~SB_WAIT;
                wakeup((caddr_t)&sb->sb_cc);
        }
#if 0 // Intentially commented
        if (so->so_state & SS_ASYNC) {
                if (so->so_pgid < 0)
                        gsignal(-so->so_pgid, SIGIO);
                else if (so->so_pgid > 0)
                        proc_signal(so->so_pgid, SIGIO);
        }
        if (sb->sb_flags & SB_KNOTE) {
                KNOTE(&sb->sb_sel.si_note, SO_FILT_HINT_LOCKED);
        }
        if (sb->sb_flags & SB_UPCALL) {
                void (*so_upcall)(struct socket *, caddr_t, int);
                caddr_t so_upcallarg;

                so_upcall = so->so_upcall;
                so_upcallarg = so->so_upcallarg;
                /* Let close know that we're about to do an upcall */
                so->so_flags |= SOF_UPCALLINUSE;

                socket_unlock(so, 0);
                (*so_upcall)(so, so_upcallarg, M_DONTWAIT);
                socket_lock(so, 0);

                so->so_flags &= ~SOF_UPCALLINUSE;
                /* Tell close that it's safe to proceed */
                if (so->so_flags & SOF_CLOSEWAIT)
                        wakeup((caddr_t)&so->so_upcall);
        }
#endif
}
Пример #15
0
int
apm_record_event(u_int event, const char *src, const char *msg)
{
	static int apm_evindex;
	struct apm_softc *sc;

	/* apm0 only */
	if (apm_cd.cd_ndevs == 0 || (sc = apm_cd.cd_devs[0]) == NULL)
		return ENXIO;

	if ((sc->sc_flags & SCFLAG_NOPRINT) == 0)
		printf("%s: %s %s\n", sc->sc_dev.dv_xname, src, msg);

	/* skip if no user waiting */
	if ((sc->sc_flags & SCFLAG_OPEN) == 0)
		return (1);

	apm_evindex++;
	KNOTE(&sc->sc_note, APM_EVENT_COMPOSE(event, apm_evindex));

	return (0);
}
Пример #16
0
static void
logtimeout(void *arg)
{

	if (!log_open)
		return;
	if (msgbuftrigger == 0) {
		callout_reset(&logsoftc.sc_callout,
		    hz / log_wakeups_per_second, logtimeout, NULL);
		return;
	}
	msgbuftrigger = 0;
	KNOTE(&logsoftc.sc_kqp.ki_note, 0);
	if ((logsoftc.sc_state & LOG_ASYNC) && logsoftc.sc_sigio != NULL)
		pgsigio(logsoftc.sc_sigio, SIGIO, 0);
	if (logsoftc.sc_state & LOG_RDWAIT) {
		wakeup((caddr_t)msgbufp);
		logsoftc.sc_state &= ~LOG_RDWAIT;
	}
	callout_reset(&logsoftc.sc_callout, hz / log_wakeups_per_second,
	    logtimeout, NULL);
}
Пример #17
0
Файл: bpf.c Проект: SbIm/xnu-env
/*
 * If there are processes sleeping on this descriptor, wake them up.
 */
static void
bpf_wakeup(struct bpf_d *d)
{
	wakeup((caddr_t)d);
	if (d->bd_async && d->bd_sig && d->bd_sigio)
		pgsigio(d->bd_sigio, d->bd_sig);

#if BSD >= 199103
	selwakeup(&d->bd_sel);
	KNOTE(&d->bd_sel.si_note, 1);
#ifndef __APPLE__
	/* XXX */
	d->bd_sel.si_pid = 0;
#endif
#else
	if (d->bd_selproc) {
		selwakeup(d->bd_selproc, (int)d->bd_selcoll);
		d->bd_selcoll = 0;
		d->bd_selproc = 0;
	}
#endif
}
Пример #18
0
/*
 * shutdown the pipe
 */
void
pipeclose(struct pipe *cpipe)
{
	struct pipe *ppipe;
	if (cpipe) {
		
		pipeselwakeup(cpipe);

		/*
		 * If the other side is blocked, wake it up saying that
		 * we want to close it down.
		 */
		while (cpipe->pipe_busy) {
			wakeup(cpipe);
			cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
			tsleep(cpipe, PRIBIO, "pipecl", 0);
		}

		/*
		 * Disconnect from peer
		 */
		if ((ppipe = cpipe->pipe_peer) != NULL) {
			pipeselwakeup(ppipe);

			ppipe->pipe_state |= PIPE_EOF;
			wakeup(ppipe);
			KNOTE(&ppipe->pipe_sel.si_note, 0);
			ppipe->pipe_peer = NULL;
		}

		/*
		 * free resources
		 */
		pipe_free_kmem(cpipe);
		pool_put(&pipe_pool, cpipe);
	}
}
Пример #19
0
static int
sysvipc_dev_ioctl(struct dev_ioctl_args *ap)
{
	int error;
	struct client *cl;
	struct proc *proc_to, *proc_from;
	struct sysvipc_req *req;
	cdev_t dev = ap->a_head.a_dev;
	struct sysvipc_softc *sysv = dev->si_drv1;
	struct client_shm_data *client_data;

	error = 0;

	switch(ap->a_cmd) {
	case REGISTER_DAEMON:
		kprintf("[driver] register daemon\n");
		sysvipc_register(dev);
		break;
	case UNREGISTER_DAEMON:
		sysvipc_unregister(dev);
		break;
	case INSTALL_PIPE:
		kprintf("[driver] install pipe\n");

		if(curthread != sysv->sysvipc_daemon_thread)
			return (EPERM);

		cl = (struct client *)ap->a_data;

		//kprintf("sysv ipc pid = %d fd 0 = %d fd 1 = %d!\n",
		//cl->pid, cl->fd[0], cl->fd[1]);

		if(cl->pid <= 0)
			return (EINVAL);

		proc_from = curthread->td_proc;
		proc_to = pfind(cl->pid);

		/* Find the request associated with the client. */
		req = sysvipc_find_req(&sysv->consumed_list,
				&sysv->consumed_mtx, cl->pid);
		if(!req)
			return (EINVAL);

		/* Install for the client two file descriptors. 
		 * The first one is used for read and the second one for
		 * write purpose.
		 */
		req->fd[0] = sysvipc_install_fd(proc_from,
				proc_to, cl->fd[0]);
		req->fd[1] = sysvipc_install_fd(proc_from,
				proc_to, cl->fd[1]);
		PRELE(proc_to);

		/* Wakeup the client. */
		wakeup(req);

		break;
	case REGISTER_TO_DAEMON:

		if(sysv == NULL)
			return (EEXIST);

		if(curthread == sysv->sysvipc_daemon_thread)
			return (EPERM);

		kprintf("[driver] register to daemon\n");
		cl = (struct client *)ap->a_data;

		/* Allocate a struture for the request. */
		req = (struct sysvipc_req*)kmalloc( sizeof(*req),
				M_TEMP, M_ZERO | M_WAITOK);

		req->proc = curthread->td_proc;

		/* Add the request to the list used read be daemon. */
		lockmgr(&sysv->req_mtx, LK_EXCLUSIVE);
		TAILQ_INSERT_HEAD(&sysv->req_list, req, req_link);
		lockmgr(&sysv->req_mtx, LK_RELEASE);

		/* Wake up daemon to process the request. */
		wakeup(sysv);
		KNOTE(&sysv->sysvipc_rkq.ki_note, 0);

		/* Wait so that the daemon processes the request and
		 * installs the two new file descriptors.
		 */
		tsleep(req, 0, "regist", 0);

		//kprintf("client wake up %d %d\n", req->fd[0], req->fd[1]);
		cl->fd[0] = req->fd[0];
		cl->fd[1] = req->fd[1];

		/* Remove the request from the list of consumed requests. */
		lockmgr(&sysv->consumed_mtx, LK_EXCLUSIVE);
		TAILQ_REMOVE(&sysv->consumed_list, req, req_link);
		lockmgr(&sysv->consumed_mtx, LK_RELEASE);

		kfree(req, M_TEMP);
		break;
	case CONSUME_REQUEST:
		if(curthread != sysv->sysvipc_daemon_thread)
			return (EPERM);

		/* Wait for new requests. */
		lockmgr(&sysv->req_mtx, LK_EXCLUSIVE);
		while(TAILQ_EMPTY(&sysv->req_list))
			lksleep(sysv, &sysv->req_mtx, 0, "sysv", 0);
		kprintf("wake up to consume request\n");

		/* Remove the request from the req_list and add it to the
		 * list of consumed requests.
		 */
		req = TAILQ_LAST( &sysv->req_list, _req_list);
		if(!req) {
			lockmgr(&sysv->req_mtx, LK_RELEASE);
			return (EINVAL);
		}
		TAILQ_REMOVE(&sysv->req_list, req, req_link);
		lockmgr(&sysv->req_mtx, LK_RELEASE);

		lockmgr(&sysv->consumed_mtx, LK_EXCLUSIVE);
		TAILQ_INSERT_HEAD(&sysv->consumed_list, req, req_link);
		kprintf("pid received = %d\n", req->proc->p_pid);
		*(pid_t *)ap->a_data = req->proc->p_pid;
		lockmgr(&sysv->consumed_mtx, LK_RELEASE);

		break;
	case INSTALL_FD:
		if(curthread != sysv->sysvipc_daemon_thread)
			return (EPERM);

		kprintf("[driver] install fd\n");
		client_data = (struct client_shm_data *)ap->a_data;

		/* Get process structure. */
		proc_from = curthread->td_proc;
		proc_to = pfind(client_data->pid);

		/* Get client's credentials. */
		//get_proc_shm_cred(proc_to, &client_data->shm_perm);

		/* Install for the client the file descriptor. */
		client_data->fd = sysvipc_install_fd(proc_from,
				proc_to, client_data->fd);

		PRELE(proc_to);

		break;
	default:
		break;
	}

	return(error);
}
Пример #20
0
int
sys_accept(struct proc *p, void *v, register_t *retval)
{
	struct sys_accept_args /* {
		syscallarg(int) s;
		syscallarg(struct sockaddr *) name;
		syscallarg(socklen_t *) anamelen;
	} */ *uap = v;
	struct file *fp, *headfp;
	struct mbuf *nam;
	socklen_t namelen;
	int error, s, tmpfd;
	struct socket *head, *so;
	int nflag;

	if (SCARG(uap, name) && (error = copyin(SCARG(uap, anamelen),
	    &namelen, sizeof (namelen))))
		return (error);
	if ((error = getsock(p->p_fd, SCARG(uap, s), &fp)) != 0)
		return (error);
	headfp = fp;
	s = splsoftnet();
	head = fp->f_data;
redo:
	if ((head->so_options & SO_ACCEPTCONN) == 0) {
		error = EINVAL;
		goto bad;
	}
	if ((head->so_state & SS_NBIO) && head->so_qlen == 0) {
		if (head->so_state & SS_CANTRCVMORE)
			error = ECONNABORTED;
		else
			error = EWOULDBLOCK;
		goto bad;
	}
	while (head->so_qlen == 0 && head->so_error == 0) {
		if (head->so_state & SS_CANTRCVMORE) {
			head->so_error = ECONNABORTED;
			break;
		}
		error = tsleep(&head->so_timeo, PSOCK | PCATCH, "netcon", 0);
		if (error) {
			goto bad;
		}
	}
	if (head->so_error) {
		error = head->so_error;
		head->so_error = 0;
		goto bad;
	}
	
	/* Take note if socket was non-blocking. */
	nflag = (headfp->f_flag & FNONBLOCK);

	fdplock(p->p_fd);
	error = falloc(p, &fp, &tmpfd);
	fdpunlock(p->p_fd);
	if (error != 0) {
		/*
		 * Probably ran out of file descriptors.  Wakeup
		 * so some other process might have a chance at it.
		 */
		wakeup_one(&head->so_timeo);
		goto bad;
	}

	nam = m_get(M_WAIT, MT_SONAME);

	/*
	 * Check whether the queue emptied while we slept: falloc() or
	 * m_get() may have blocked, allowing the connection to be reset
	 * or another thread or process to accept it.  If so, start over.
	 */
	if (head->so_qlen == 0) {
		m_freem(nam);
		fdplock(p->p_fd);
		fdremove(p->p_fd, tmpfd);
		closef(fp, p);
		fdpunlock(p->p_fd);
		goto redo;
	}

	/*
	 * Do not sleep after we have taken the socket out of the queue.
	 */
	so = TAILQ_FIRST(&head->so_q);
	if (soqremque(so, 1) == 0)
		panic("accept");

	/* connection has been removed from the listen queue */
	KNOTE(&head->so_rcv.sb_sel.si_note, 0);

	fp->f_type = DTYPE_SOCKET;
	fp->f_flag = FREAD | FWRITE | nflag;
	fp->f_ops = &socketops;
	fp->f_data = so;
	error = soaccept(so, nam);
	if (!error && SCARG(uap, name)) {
		error = copyaddrout(p, nam, SCARG(uap, name), namelen,
		    SCARG(uap, anamelen));
	}

	if (error) {
		/* if an error occurred, free the file descriptor */
		fdplock(p->p_fd);
		fdremove(p->p_fd, tmpfd);
		closef(fp, p);
		fdpunlock(p->p_fd);
	} else {
		FILE_SET_MATURE(fp, p);
		*retval = tmpfd;
	}
	m_freem(nam);
bad:
	splx(s);
	FRELE(headfp, p);
	return (error);
}
Пример #21
0
/*
 * Read from the character device.
 * Returns zero if successful, ENXIO if dying, EINVAL if an attempt
 * was made to read less than CMX_MIN_RDLEN bytes or less than the
 * device has available, or any of the errors that cmx_sync_write_SCR
 * can return.  Partial reads are not supported.
 */
static int
cmx_read(struct dev_read_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct cmx_softc *sc;
	struct uio *uio = ap->a_uio;
	unsigned long bytes_left;
	uint8_t uc;
	int rv, amnt, offset;

	sc = devclass_get_softc(cmx_devclass, minor(dev));
	if (sc == NULL || sc->dying)
		return ENXIO;

	DEBUG_printf(sc->dev, "called (len=%d flag=%b)\n",
		uio->uio_resid, ap->a_ioflag, MODEBITS);

	CMX_LOCK(sc);
	if (sc->polling) {
		DEBUG_printf(sc->dev, "disabling polling\n");
		callout_stop(&sc->ch);
		sc->polling = 0;
		CMX_UNLOCK(sc);
		KNOTE(&sc->kq.ki_note, 0);
	} else {
		CMX_UNLOCK(sc);
	}

	if (uio->uio_resid == 0) {
		return 0;
	}

	if (uio->uio_resid < CMX_MIN_RDLEN) {
		return EINVAL;
	}

	if (ap->a_ioflag & O_NONBLOCK) {
		if (cmx_test_BSR(sc, BSR_BULK_IN_FULL, 0)) {
			return EAGAIN;
		}
	}

	for (int i = 0; i < 5; i++) {
		if ((rv = cmx_wait_BSR(sc, BSR_BULK_IN_FULL, 1)) != 0) {
			return rv;
		}
		sc->buf[i] = cmx_read_DTR(sc);
		DEBUG_printf(sc->dev, "buf[%02x]=%02x\n", i, sc->buf[i]);
	}

	bytes_left = CMX_MIN_RDLEN +
	                (0x000000FF&((char)sc->buf[1])) +
	                (0x0000FF00&((char)sc->buf[2] << 8)) +
	                (0x00FF0000&((char)sc->buf[3] << 16)) +
	                (0xFF000000&((char)sc->buf[4] << 24));
	DEBUG_printf(sc->dev, "msgsz=%lu\n", bytes_left);

	if (uio->uio_resid < bytes_left) {
		return EINVAL;
	}

	offset = 5; /* prefetched header */
	while (bytes_left > 0) {
		amnt = MIN(bytes_left, sizeof(sc->buf));

		for (int i = offset; i < amnt; i++) {
			if ((rv = cmx_wait_BSR(sc, BSR_BULK_IN_FULL, 1))!=0) {
				return rv;
			}
			sc->buf[i] = cmx_read_DTR(sc);
			DEBUG_printf(sc->dev, "buf[%02x]=%02x\n",
					i, sc->buf[i]);
		}

		if ((rv = uiomove(sc->buf, amnt, uio)) != 0) {
			DEBUG_printf(sc->dev, "uiomove failed (%d)\n", rv);
			return rv;
		}

		if (offset)
			offset = 0;
		bytes_left -= amnt;
	}

	if ((rv = cmx_wait_BSR(sc, BSR_BULK_IN_FULL, 1)) != 0) {
		return rv;
	}

	if ((rv = cmx_sync_write_SCR(sc, SCR_READER_TO_HOST_DONE)) != 0) {
		return rv;
	}

	uc = cmx_read_DTR(sc);
	DEBUG_printf(sc->dev, "success (DTR=%02x)\n", uc);
	return 0;
}
Пример #22
0
/*
 * General fork call.  Note that another LWP in the process may call exec()
 * or exit() while we are forking.  It's safe to continue here, because
 * neither operation will complete until all LWPs have exited the process.
 */
int
fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize,
    void (*func)(void *), void *arg, register_t *retval,
    struct proc **rnewprocp)
{
	struct proc	*p1, *p2, *parent;
	struct plimit   *p1_lim;
	uid_t		uid;
	struct lwp	*l2;
	int		count;
	vaddr_t		uaddr;
	int		tnprocs;
	int		tracefork;
	int		error = 0;

	p1 = l1->l_proc;
	uid = kauth_cred_getuid(l1->l_cred);
	tnprocs = atomic_inc_uint_nv(&nprocs);

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create.
	 */
	if (__predict_false(tnprocs >= maxproc))
		error = -1;
	else
		error = kauth_authorize_process(l1->l_cred,
		    KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);

	if (error) {
		static struct timeval lasttfm;
		atomic_dec_uint(&nprocs);
		if (ratecheck(&lasttfm, &fork_tfmrate))
			tablefull("proc", "increase kern.maxproc or NPROC");
		if (forkfsleep)
			kpause("forkmx", false, forkfsleep, NULL);
		return EAGAIN;
	}

	/*
	 * Enforce limits.
	 */
	count = chgproccnt(uid, 1);
	if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
		if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT,
		    p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
		    &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) {
			(void)chgproccnt(uid, -1);
			atomic_dec_uint(&nprocs);
			if (forkfsleep)
				kpause("forkulim", false, forkfsleep, NULL);
			return EAGAIN;
		}
	}

	/*
	 * Allocate virtual address space for the U-area now, while it
	 * is still easy to abort the fork operation if we're out of
	 * kernel virtual address space.
	 */
	uaddr = uvm_uarea_alloc();
	if (__predict_false(uaddr == 0)) {
		(void)chgproccnt(uid, -1);
		atomic_dec_uint(&nprocs);
		return ENOMEM;
	}

	/*
	 * We are now committed to the fork.  From here on, we may
	 * block on resources, but resource allocation may NOT fail.
	 */

	/* Allocate new proc. */
	p2 = proc_alloc();

	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */
	memset(&p2->p_startzero, 0,
	    (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
	memcpy(&p2->p_startcopy, &p1->p_startcopy,
	    (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));

	TAILQ_INIT(&p2->p_sigpend.sp_info);

	LIST_INIT(&p2->p_lwps);
	LIST_INIT(&p2->p_sigwaiters);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 * Inherit flags we want to keep.  The flags related to SIGCHLD
	 * handling are important in order to keep a consistent behaviour
	 * for the child after the fork.  If we are a 32-bit process, the
	 * child will be too.
	 */
	p2->p_flag =
	    p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32);
	p2->p_emul = p1->p_emul;
	p2->p_execsw = p1->p_execsw;

	if (flags & FORK_SYSTEM) {
		/*
		 * Mark it as a system process.  Set P_NOCLDWAIT so that
		 * children are reparented to init(8) when they exit.
		 * init(8) can easily wait them out for us.
		 */
		p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT);
	}

	mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
	mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
	rw_init(&p2->p_reflock);
	cv_init(&p2->p_waitcv, "wait");
	cv_init(&p2->p_lwpcv, "lwpwait");

	/*
	 * Share a lock between the processes if they are to share signal
	 * state: we must synchronize access to it.
	 */
	if (flags & FORK_SHARESIGS) {
		p2->p_lock = p1->p_lock;
		mutex_obj_hold(p1->p_lock);
	} else
		p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);

	kauth_proc_fork(p1, p2);

	p2->p_raslist = NULL;
#if defined(__HAVE_RAS)
	ras_fork(p1, p2);
#endif

	/* bump references to the text vnode (for procfs) */
	p2->p_textvp = p1->p_textvp;
	if (p2->p_textvp)
		vref(p2->p_textvp);

	if (flags & FORK_SHAREFILES)
		fd_share(p2);
	else if (flags & FORK_CLEANFILES)
		p2->p_fd = fd_init(NULL);
	else
		p2->p_fd = fd_copy();

	/* XXX racy */
	p2->p_mqueue_cnt = p1->p_mqueue_cnt;

	if (flags & FORK_SHARECWD)
		cwdshare(p2);
	else
		p2->p_cwdi = cwdinit();

	/*
	 * Note: p_limit (rlimit stuff) is copy-on-write, so normally
	 * we just need increase pl_refcnt.
	 */
	p1_lim = p1->p_limit;
	if (!p1_lim->pl_writeable) {
		lim_addref(p1_lim);
		p2->p_limit = p1_lim;
	} else {
		p2->p_limit = lim_copy(p1_lim);
	}

	if (flags & FORK_PPWAIT) {
		/* Mark ourselves as waiting for a child. */
		l1->l_pflag |= LP_VFORKWAIT;
		p2->p_lflag = PL_PPWAIT;
		p2->p_vforklwp = l1;
	} else {
		p2->p_lflag = 0;
	}
	p2->p_sflag = 0;
	p2->p_slflag = 0;
	parent = (flags & FORK_NOWAIT) ? initproc : p1;
	p2->p_pptr = parent;
	p2->p_ppid = parent->p_pid;
	LIST_INIT(&p2->p_children);

	p2->p_aio = NULL;

#ifdef KTRACE
	/*
	 * Copy traceflag and tracefile if enabled.
	 * If not inherited, these were zeroed above.
	 */
	if (p1->p_traceflag & KTRFAC_INHERIT) {
		mutex_enter(&ktrace_lock);
		p2->p_traceflag = p1->p_traceflag;
		if ((p2->p_tracep = p1->p_tracep) != NULL)
			ktradref(p2);
		mutex_exit(&ktrace_lock);
	}
#endif

	/*
	 * Create signal actions for the child process.
	 */
	p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS);
	mutex_enter(p1->p_lock);
	p2->p_sflag |=
	    (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
	sched_proc_fork(p1, p2);
	mutex_exit(p1->p_lock);

	p2->p_stflag = p1->p_stflag;

	/*
	 * p_stats.
	 * Copy parts of p_stats, and zero out the rest.
	 */
	p2->p_stats = pstatscopy(p1->p_stats);

	/*
	 * Set up the new process address space.
	 */
	uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false);

	/*
	 * Finish creating the child process.
	 * It will return through a different path later.
	 */
	lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0,
	    stack, stacksize, (func != NULL) ? func : child_return, arg, &l2,
	    l1->l_class);

	/*
	 * Inherit l_private from the parent.
	 * Note that we cannot use lwp_setprivate() here since that
	 * also sets the CPU TLS register, which is incorrect if the
	 * process has changed that without letting the kernel know.
	 */
	l2->l_private = l1->l_private;

	/*
	 * If emulation has a process fork hook, call it now.
	 */
	if (p2->p_emul->e_proc_fork)
		(*p2->p_emul->e_proc_fork)(p2, l1, flags);

	/*
	 * ...and finally, any other random fork hooks that subsystems
	 * might have registered.
	 */
	doforkhooks(p2, p1);

	SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0);

	/*
	 * It's now safe for the scheduler and other processes to see the
	 * child process.
	 */
	mutex_enter(proc_lock);

	if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
		p2->p_lflag |= PL_CONTROLT;

	LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling);
	p2->p_exitsig = exitsig;		/* signal for parent on exit */

	/*
	 * We don't want to tracefork vfork()ed processes because they
	 * will not receive the SIGTRAP until it is too late.
	 */
	tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) ==
	    (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0;
	if (tracefork) {
		p2->p_slflag |= PSL_TRACED;
		p2->p_opptr = p2->p_pptr;
		if (p2->p_pptr != p1->p_pptr) {
			struct proc *parent1 = p2->p_pptr;

			if (parent1->p_lock < p2->p_lock) {
				if (!mutex_tryenter(parent1->p_lock)) {
					mutex_exit(p2->p_lock);
					mutex_enter(parent1->p_lock);
				}
			} else if (parent1->p_lock > p2->p_lock) {
				mutex_enter(parent1->p_lock);
			}
			parent1->p_slflag |= PSL_CHTRACED;
			proc_reparent(p2, p1->p_pptr);
			if (parent1->p_lock != p2->p_lock)
				mutex_exit(parent1->p_lock);
		}

		/*
		 * Set ptrace status.
		 */
		p1->p_fpid = p2->p_pid;
		p2->p_fpid = p1->p_pid;
	}

	LIST_INSERT_AFTER(p1, p2, p_pglist);
	LIST_INSERT_HEAD(&allproc, p2, p_list);

	p2->p_trace_enabled = trace_is_enabled(p2);
#ifdef __HAVE_SYSCALL_INTERN
	(*p2->p_emul->e_syscall_intern)(p2);
#endif

	/*
	 * Update stats now that we know the fork was successful.
	 */
	uvmexp.forks++;
	if (flags & FORK_PPWAIT)
		uvmexp.forks_ppwait++;
	if (flags & FORK_SHAREVM)
		uvmexp.forks_sharevm++;

	/*
	 * Pass a pointer to the new process to the caller.
	 */
	if (rnewprocp != NULL)
		*rnewprocp = p2;

	if (ktrpoint(KTR_EMUL))
		p2->p_traceflag |= KTRFAC_TRC_EMUL;

	/*
	 * Notify any interested parties about the new process.
	 */
	if (!SLIST_EMPTY(&p1->p_klist)) {
		mutex_exit(proc_lock);
		KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
		mutex_enter(proc_lock);
	}

	/*
	 * Make child runnable, set start time, and add to run queue except
	 * if the parent requested the child to start in SSTOP state.
	 */
	mutex_enter(p2->p_lock);

	/*
	 * Start profiling.
	 */
	if ((p2->p_stflag & PST_PROFIL) != 0) {
		mutex_spin_enter(&p2->p_stmutex);
		startprofclock(p2);
		mutex_spin_exit(&p2->p_stmutex);
	}

	getmicrotime(&p2->p_stats->p_start);
	p2->p_acflag = AFORK;
	lwp_lock(l2);
	KASSERT(p2->p_nrlwps == 1);
	if (p2->p_sflag & PS_STOPFORK) {
		struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate;
		p2->p_nrlwps = 0;
		p2->p_stat = SSTOP;
		p2->p_waited = 0;
		p1->p_nstopchild++;
		l2->l_stat = LSSTOP;
		KASSERT(l2->l_wchan == NULL);
		lwp_unlock_to(l2, spc->spc_lwplock);
	} else {
		p2->p_nrlwps = 1;
		p2->p_stat = SACTIVE;
		l2->l_stat = LSRUN;
		sched_enqueue(l2, false);
		lwp_unlock(l2);
	}

	/*
	 * Return child pid to parent process,
	 * marking us as parent via retval[1].
	 */
	if (retval != NULL) {
		retval[0] = p2->p_pid;
		retval[1] = 0;
	}
	mutex_exit(p2->p_lock);

	/*
	 * Preserve synchronization semantics of vfork.  If waiting for
	 * child to exec or exit, sleep until it clears LP_VFORKWAIT.
	 */
#if 0
	while (l1->l_pflag & LP_VFORKWAIT) {
		cv_wait(&l1->l_waitcv, proc_lock);
	}
#else
	while (p2->p_lflag & PL_PPWAIT)
		cv_wait(&p1->p_waitcv, proc_lock);
#endif

	/*
	 * Let the parent know that we are tracing its child.
	 */
	if (tracefork) {
		ksiginfo_t ksi;

		KSI_INIT_EMPTY(&ksi);
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_lid = l1->l_lid;
		kpsignal(p1, &ksi, NULL);
	}
	mutex_exit(proc_lock);

	return 0;
}
Пример #23
0
int
fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
    void (*func)(void *), void *arg, register_t *retval,
    struct proc **rnewprocp)
{
	struct proc *p2;
	uid_t uid;
	struct vmspace *vm;
	int count;
	vaddr_t uaddr;
	int s;
	extern void endtsleep(void *);
	extern void realitexpire(void *);

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create. We reserve
	 * the last 5 processes to root. The variable nprocs is the current
	 * number of processes, maxproc is the limit.
	 */
	uid = p1->p_cred->p_ruid;
	if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) {
		static struct timeval lasttfm;

		if (ratecheck(&lasttfm, &fork_tfmrate))
			tablefull("proc");
		return (EAGAIN);
	}
	nprocs++;

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit.
	 */
	count = chgproccnt(uid, 1);
	if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
		(void)chgproccnt(uid, -1);
		nprocs--;
		return (EAGAIN);
	}

	uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1);
	if (uaddr == 0) {
		chgproccnt(uid, -1);
		nprocs--;
		return (ENOMEM);
	}

	/*
	 * From now on, we're committed to the fork and cannot fail.
	 */

	/* Allocate new proc. */
	p2 = pool_get(&proc_pool, PR_WAITOK);

	p2->p_stat = SIDL;			/* protect against others */
	p2->p_exitsig = exitsig;
	p2->p_forw = p2->p_back = NULL;

#ifdef RTHREADS
	if (flags & FORK_THREAD) {
		atomic_setbits_int(&p2->p_flag, P_THREAD);
		p2->p_p = p1->p_p;
		TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link);
	} else {
		process_new(p2, p1);
	}
#else
	process_new(p2, p1);
#endif

	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */
	bzero(&p2->p_startzero,
	    (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));

	/*
	 * Initialize the timeouts.
	 */
	timeout_set(&p2->p_sleep_to, endtsleep, p2);
	timeout_set(&p2->p_realit_to, realitexpire, p2);

#if defined(__HAVE_CPUINFO)
	p2->p_cpu = p1->p_cpu;
#endif

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 * The p_stats and p_sigacts substructs are set in vm_fork.
	 */
	p2->p_flag = 0;
	p2->p_emul = p1->p_emul;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);
	atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC));
	if (flags & FORK_PTRACE)
		atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED);
#ifdef RTHREADS
	if (flags & FORK_THREAD) {
		/* nothing */
	} else
#endif
	{
		p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK);
		bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred));
		p2->p_p->ps_cred->p_refcnt = 1;
		crhold(p1->p_ucred);
	}

	TAILQ_INIT(&p2->p_selects);

	/* bump references to the text vnode (for procfs) */
	p2->p_textvp = p1->p_textvp;
	if (p2->p_textvp)
		VREF(p2->p_textvp);

	if (flags & FORK_CLEANFILES)
		p2->p_fd = fdinit(p1);
	else if (flags & FORK_SHAREFILES)
		p2->p_fd = fdshare(p1);
	else
		p2->p_fd = fdcopy(p1);

	/*
	 * If ps_limit is still copy-on-write, bump refcnt,
	 * otherwise get a copy that won't be modified.
	 * (If PL_SHAREMOD is clear, the structure is shared
	 * copy-on-write.)
	 */
#ifdef RTHREADS
	if (flags & FORK_THREAD) {
		/* nothing */
	} else
#endif
	{
		if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD)
			p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit);
		else {
			p2->p_p->ps_limit = p1->p_p->ps_limit;
			p2->p_p->ps_limit->p_refcnt++;
		}
	}

	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		atomic_setbits_int(&p2->p_flag, P_CONTROLT);
	if (flags & FORK_PPWAIT)
		atomic_setbits_int(&p2->p_flag, P_PPWAIT);
	p2->p_pptr = p1;
	if (flags & FORK_NOZOMBIE)
		atomic_setbits_int(&p2->p_flag, P_NOZOMBIE);
	LIST_INIT(&p2->p_children);

#ifdef KTRACE
	/*
	 * Copy traceflag and tracefile if enabled.
	 * If not inherited, these were zeroed above.
	 */
	if (p1->p_traceflag & KTRFAC_INHERIT) {
		p2->p_traceflag = p1->p_traceflag;
		if ((p2->p_tracep = p1->p_tracep) != NULL)
			VREF(p2->p_tracep);
	}
#endif

	/*
	 * set priority of child to be that of parent
	 * XXX should move p_estcpu into the region of struct proc which gets
	 * copied.
	 */
	scheduler_fork_hook(p1, p2);

	/*
	 * Create signal actions for the child process.
	 */
	if (flags & FORK_SIGHAND)
		sigactsshare(p1, p2);
	else
		p2->p_sigacts = sigactsinit(p1);

	/*
	 * If emulation has process fork hook, call it now.
	 */
	if (p2->p_emul->e_proc_fork)
		(*p2->p_emul->e_proc_fork)(p2, p1);

	p2->p_addr = (struct user *)uaddr;

	/*
	 * Finish creating the child process.  It will return through a
	 * different path later.
	 */
	uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack,
	    stacksize, func ? func : child_return, arg ? arg : p2);

	timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2);
	timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2);

	vm = p2->p_vmspace;

	if (flags & FORK_FORK) {
		forkstat.cntfork++;
		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
	} else if (flags & FORK_VFORK) {
		forkstat.cntvfork++;
		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
	} else if (flags & FORK_RFORK) {
		forkstat.cntrfork++;
		forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize;
	} else {
		forkstat.cntkthread++;
		forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize;
	}

	/* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */
	do {
		lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX;
	} while (pidtaken(lastpid));
	p2->p_pid = lastpid;

	LIST_INSERT_HEAD(&allproc, p2, p_list);
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	if (p2->p_flag & P_TRACED) {
		p2->p_oppid = p1->p_pid;
		if (p2->p_pptr != p1->p_pptr)
			proc_reparent(p2, p1->p_pptr);

		/*
		 * Set ptrace status.
		 */
		if (flags & FORK_FORK) {
			p2->p_ptstat = malloc(sizeof(*p2->p_ptstat),
			    M_SUBPROC, M_WAITOK);
			p1->p_ptstat->pe_report_event = PTRACE_FORK;
			p2->p_ptstat->pe_report_event = PTRACE_FORK;
			p1->p_ptstat->pe_other_pid = p2->p_pid;
			p2->p_ptstat->pe_other_pid = p1->p_pid;
		}
	}

#if NSYSTRACE > 0
	if (ISSET(p1->p_flag, P_SYSTRACE))
		systrace_fork(p1, p2);
#endif

	/*
	 * Make child runnable, set start time, and add to run queue.
	 */
	SCHED_LOCK(s);
 	getmicrotime(&p2->p_stats->p_start);
	p2->p_acflag = AFORK;
	p2->p_stat = SRUN;
	setrunqueue(p2);
	SCHED_UNLOCK(s);

	/*
	 * Notify any interested parties about the new process.
	 */
	KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);

	/*
	 * Update stats now that we know the fork was successfull.
	 */
	uvmexp.forks++;
	if (flags & FORK_PPWAIT)
		uvmexp.forks_ppwait++;
	if (flags & FORK_SHAREVM)
		uvmexp.forks_sharevm++;

	/*
	 * Pass a pointer to the new process to the caller.
	 */
	if (rnewprocp != NULL)
		*rnewprocp = p2;

	/*
	 * Preserve synchronization semantics of vfork.  If waiting for
	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
	 * proc (in case of exit).
	 */
	if (flags & FORK_PPWAIT)
		while (p2->p_flag & P_PPWAIT)
			tsleep(p1, PWAIT, "ppwait", 0);

	/*
	 * If we're tracing the child, alert the parent too.
	 */
	if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED))
		psignal(p1, SIGTRAP);

	/*
	 * Return child pid to parent process,
	 * marking us as parent via retval[1].
	 */
	if (retval != NULL) {
		retval[0] = p2->p_pid;
		retval[1] = 0;
	}
	return (0);
}
Пример #24
0
static int 
bsd_accept(cyg_file *fp, cyg_file *new_fp,
           struct sockaddr *name, socklen_t *anamelen)
{
    socklen_t namelen = 0;
    int error = 0, s;
    struct socket *head, *so;
    struct sockaddr *sa;

    if( anamelen != NULL)
        namelen = *anamelen;

    s = splsoftnet();
    head = (struct socket *)fp->f_data;

    if ((head->so_options & SO_ACCEPTCONN) == 0) {
        splx(s);
        return (EINVAL);
    }

    if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
        splx(s);
        return (EWOULDBLOCK);
    }

    while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
        if (head->so_state & SS_CANTRCVMORE) {
            head->so_error = ECONNABORTED;
            break;
        }
        error = tsleep((caddr_t)&head->so_timeo, PSOCK | PCATCH,
                       "netcon", 0);
        if (error) {
            splx(s);
            return (error);
        }
    }

    if (head->so_error) {
        error = head->so_error;
        head->so_error = 0;
        splx(s);
        return (error);
    }

    /*
     * At this point we know that there is at least one connection
     * ready to be accepted. Remove it from the queue prior to
     * allocating the file descriptor for it since falloc() may
     * block allowing another process to accept the connection
     * instead.
     */
    so = TAILQ_FIRST(&head->so_comp);
    TAILQ_REMOVE(&head->so_comp, so, so_list);
    head->so_qlen--;

#if 0 // FIXME
    fflag = lfp->f_flag;
    error = falloc(p, &nfp, &fd);
    if (error) {
        /*
         * Probably ran out of file descriptors. Put the
         * unaccepted connection back onto the queue and
         * do another wakeup so some other process might
         * have a chance at it.
         */
        TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
        head->so_qlen++;
        wakeup_one(&head->so_timeo);
        splx(s);
        goto done;
    }
    fhold(nfp);
    p->p_retval[0] = fd;

    /* connection has been removed from the listen queue */
    KNOTE(&head->so_rcv.sb_sel.si_note, 0);
#endif

    so->so_state &= ~SS_COMP;
    so->so_head = NULL;

    cyg_selinit(&so->so_rcv.sb_sel);
    cyg_selinit(&so->so_snd.sb_sel);
    
    new_fp->f_type      = DTYPE_SOCKET;
    new_fp->f_flag     |= FREAD|FWRITE;
    new_fp->f_offset    = 0;
    new_fp->f_ops       = &bsd_sock_fileops;
    new_fp->f_data      = (CYG_ADDRWORD)so;
    new_fp->f_xops      = (CYG_ADDRWORD)&bsd_sockops;
    
    sa = 0;
    error = soaccept(so, &sa);
    if (error) {
        /*
         * return a namelen of zero for older code which might
         * ignore the return value from accept.
         */	
        if (name != NULL) {
            *anamelen = 0;
        }
        goto noconnection;
    }
    if (sa == NULL) {
        namelen = 0;
        if (name)
            goto gotnoname;
        splx(s);
        error = 0;
        goto done;
    }
    if (name) {
        if (namelen > sa->sa_len)
            namelen = sa->sa_len;
#ifdef COMPAT_OLDSOCK
        if (compat)
            ((struct osockaddr *)sa)->sa_family = sa->sa_family;
#endif
        error = copyout(sa, (caddr_t)name, namelen);
        if (!error)
gotnoname:
        *anamelen = namelen;
    }
noconnection:

#if 0 // FIXME
	/*
	 * close the new descriptor, assuming someone hasn't ripped it
	 * out from under us.
	 */
	if (error) {
		if (fdp->fd_ofiles[fd] == nfp) {
			fdp->fd_ofiles[fd] = NULL;
			fdrop(nfp, p);
		}
	}
	splx(s);

	/*
	 * Release explicitly held references before returning.
	 */
done:
	if (nfp != NULL)
		fdrop(nfp, p);
	fdrop(lfp, p);
	return (error);
    m_freem(nam);
#else
 done:
#endif
    splx(s);
    
    return (error);
}
Пример #25
0
/* ARGSUSED */
int
sys_execve(struct proc *p, void *v, register_t *retval)
{
	struct sys_execve_args /* {
		syscallarg(const char *) path;
		syscallarg(char *const *) argp;
		syscallarg(char *const *) envp;
	} */ *uap = v;
	int error;
	struct exec_package pack;
	struct nameidata nid;
	struct vattr attr;
	struct ucred *cred = p->p_ucred;
	char *argp;
	char * const *cpp, *dp, *sp;
#ifdef KTRACE
	char *env_start;
#endif
	struct process *pr = p->p_p;
	long argc, envc;
	size_t len, sgap;
#ifdef MACHINE_STACK_GROWS_UP
	size_t slen;
#endif
	char *stack;
	struct ps_strings arginfo;
	struct vmspace *vm = pr->ps_vmspace;
	char **tmpfap;
	extern struct emul emul_native;
#if NSYSTRACE > 0
	int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC);
	size_t pathbuflen;
#endif
	char *pathbuf = NULL;
	struct vnode *otvp;

	/* get other threads to stop */
	if ((error = single_thread_set(p, SINGLE_UNWIND, 1)))
		return (error);

	/*
	 * Cheap solution to complicated problems.
	 * Mark this process as "leave me alone, I'm execing".
	 */
	atomic_setbits_int(&pr->ps_flags, PS_INEXEC);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE)) {
		systrace_execve0(p);
		pathbuf = pool_get(&namei_pool, PR_WAITOK);
		error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN,
		    &pathbuflen);
		if (error != 0)
			goto clrflag;
	}
#endif
	if (pathbuf != NULL) {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p);
	} else {
		NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE,
		    SCARG(uap, path), p);
	}

	/*
	 * initialize the fields of the exec package.
	 */
	if (pathbuf != NULL)
		pack.ep_name = pathbuf;
	else
		pack.ep_name = (char *)SCARG(uap, path);
	pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
	pack.ep_hdrlen = exec_maxhdrsz;
	pack.ep_hdrvalid = 0;
	pack.ep_ndp = &nid;
	pack.ep_interp = NULL;
	pack.ep_emul_arg = NULL;
	VMCMDSET_INIT(&pack.ep_vmcmds);
	pack.ep_vap = &attr;
	pack.ep_emul = &emul_native;
	pack.ep_flags = 0;

	/* see if we can run it. */
	if ((error = check_exec(p, &pack)) != 0) {
		goto freehdr;
	}

	/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */

	/* allocate an argument buffer */
	argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
#ifdef DIAGNOSTIC
	if (argp == NULL)
		panic("execve: argp == NULL");
#endif
	dp = argp;
	argc = 0;

	/* copy the fake args list, if there's one, freeing it as we go */
	if (pack.ep_flags & EXEC_HASARGL) {
		tmpfap = pack.ep_fa;
		while (*tmpfap != NULL) {
			char *cp;

			cp = *tmpfap;
			while (*cp)
				*dp++ = *cp++;
			*dp++ = '\0';

			free(*tmpfap, M_EXEC, 0);
			tmpfap++; argc++;
		}
		free(pack.ep_fa, M_EXEC, 0);
		pack.ep_flags &= ~EXEC_HASARGL;
	}

	/* Now get argv & environment */
	if (!(cpp = SCARG(uap, argp))) {
		error = EFAULT;
		goto bad;
	}

	if (pack.ep_flags & EXEC_SKIPARG)
		cpp++;

	while (1) {
		len = argp + ARG_MAX - dp;
		if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
			goto bad;
		if (!sp)
			break;
		if ((error = copyinstr(sp, dp, len, &len)) != 0) {
			if (error == ENAMETOOLONG)
				error = E2BIG;
			goto bad;
		}
		dp += len;
		cpp++;
		argc++;
	}

	/* must have at least one argument */
	if (argc == 0) {
		error = EINVAL;
		goto bad;
	}

#ifdef KTRACE
	if (KTRPOINT(p, KTR_EXECARGS))
		ktrexec(p, KTR_EXECARGS, argp, dp - argp);
#endif

	envc = 0;
	/* environment does not need to be there */
	if ((cpp = SCARG(uap, envp)) != NULL ) {
#ifdef KTRACE
		env_start = dp;
#endif
		while (1) {
			len = argp + ARG_MAX - dp;
			if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
				goto bad;
			if (!sp)
				break;
			if ((error = copyinstr(sp, dp, len, &len)) != 0) {
				if (error == ENAMETOOLONG)
					error = E2BIG;
				goto bad;
			}
			dp += len;
			cpp++;
			envc++;
		}

#ifdef KTRACE
		if (KTRPOINT(p, KTR_EXECENV))
			ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
#endif
	}

	dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);

	sgap = STACKGAPLEN;

	/*
	 * If we have enabled random stackgap, the stack itself has already
	 * been moved from a random location, but is still aligned to a page
	 * boundary.  Provide the lower bits of random placement now.
	 */
	if (stackgap_random != 0) {
		sgap += arc4random() & PAGE_MASK;
		sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
	}

	/* Now check if args & environ fit into new stack */
	len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) +
	    sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;

	len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;

	if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
		error = ENOMEM;
		goto bad;
	}

	/* adjust "active stack depth" for process VSZ */
	pack.ep_ssize = len;	/* maybe should go elsewhere, but... */

	/*
	 * we're committed: any further errors will kill the process, so
	 * kill the other threads now.
	 */
	single_thread_set(p, SINGLE_EXIT, 0);

	/*
	 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
	 * pr_vmspace!
	 */
	uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);

	vm = pr->ps_vmspace;
	/* Now map address space */
	vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
	vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
	    trunc_page(pack.ep_taddr));
	vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
	vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
	    trunc_page(pack.ep_daddr));
	vm->vm_dused = 0;
	vm->vm_ssize = atop(round_page(pack.ep_ssize));
	vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
	vm->vm_minsaddr = (char *)pack.ep_minsaddr;

	/* create the new process's VM space by running the vmcmds */
#ifdef DIAGNOSTIC
	if (pack.ep_vmcmds.evs_used == 0)
		panic("execve: no vmcmds");
#endif
	error = exec_process_vmcmds(p, &pack);

	/* if an error happened, deallocate and punt */
	if (error)
		goto exec_abort;

	/* old "stackgap" is gone now */
	pr->ps_stackgap = 0;

#ifdef MACHINE_STACK_GROWS_UP
	pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
        if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
            trunc_page(pr->ps_strings), PROT_NONE, TRUE))
                goto exec_abort;
#else
	pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
        if (uvm_map_protect(&vm->vm_map,
            round_page(pr->ps_strings + sizeof(arginfo)),
            (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE))
                goto exec_abort;
#endif

	/* remember information about the process */
	arginfo.ps_nargvstr = argc;
	arginfo.ps_nenvstr = envc;

#ifdef MACHINE_STACK_GROWS_UP
	stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
	slen = len - sizeof(arginfo) - sgap;
#else
	stack = (char *)(vm->vm_minsaddr - len);
#endif
	/* Now copy argc, args & environ to new stack */
	if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp))
		goto exec_abort;

	/* copy out the process's ps_strings structure */
	if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
		goto exec_abort;

	stopprofclock(pr);	/* stop profiling */
	fdcloseexec(p);		/* handle close on exec */
	execsigs(p);		/* reset caught signals */
	TCB_SET(p, NULL);	/* reset the TCB address */
	pr->ps_kbind_addr = 0;	/* reset the kbind bits */
	pr->ps_kbind_cookie = 0;

	/* set command name & other accounting info */
	memset(p->p_comm, 0, sizeof(p->p_comm));
	len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
	memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len);
	pr->ps_acflag &= ~AFORK;

	/* record proc's vnode, for use by sysctl */
	otvp = pr->ps_textvp;
	vref(pack.ep_vp);
	pr->ps_textvp = pack.ep_vp;
	if (otvp)
		vrele(otvp);

	atomic_setbits_int(&pr->ps_flags, PS_EXEC);
	if (pr->ps_flags & PS_PPWAIT) {
		atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
		atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
		wakeup(pr->ps_pptr);
	}

	/*
	 * If process does execve() while it has a mismatched real,
	 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
	 */
	if (cred->cr_uid != cred->cr_ruid ||
	    cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_rgid ||
	    cred->cr_gid != cred->cr_svgid)
		atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
	else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);

	atomic_clearbits_int(&pr->ps_flags, PS_TAMED);
	tame_dropwpaths(pr);

	/*
	 * deal with set[ug]id.
	 * MNT_NOEXEC has already been used to disable s[ug]id.
	 */
	if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
		int i;

		atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);

#ifdef KTRACE
		/*
		 * If process is being ktraced, turn off - unless
		 * root set it.
		 */
		if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
			ktrcleartrace(pr);
#endif
		p->p_ucred = cred = crcopy(cred);
		if (attr.va_mode & VSUID)
			cred->cr_uid = attr.va_uid;
		if (attr.va_mode & VSGID)
			cred->cr_gid = attr.va_gid;

		/*
		 * For set[ug]id processes, a few caveats apply to
		 * stdin, stdout, and stderr.
		 */
		error = 0;
		fdplock(p->p_fd);
		for (i = 0; i < 3; i++) {
			struct file *fp = NULL;

			/*
			 * NOTE - This will never return NULL because of
			 * immature fds. The file descriptor table is not
			 * shared because we're suid.
			 */
			fp = fd_getfile(p->p_fd, i);

			/*
			 * Ensure that stdin, stdout, and stderr are already
			 * allocated.  We do not want userland to accidentally
			 * allocate descriptors in this range which has implied
			 * meaning to libc.
			 */
			if (fp == NULL) {
				short flags = FREAD | (i == 0 ? 0 : FWRITE);
				struct vnode *vp;
				int indx;

				if ((error = falloc(p, &fp, &indx)) != 0)
					break;
#ifdef DIAGNOSTIC
				if (indx != i)
					panic("sys_execve: falloc indx != i");
#endif
				if ((error = cdevvp(getnulldev(), &vp)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					break;
				}
				if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
					fdremove(p->p_fd, indx);
					closef(fp, p);
					vrele(vp);
					break;
				}
				if (flags & FWRITE)
					vp->v_writecount++;
				fp->f_flag = flags;
				fp->f_type = DTYPE_VNODE;
				fp->f_ops = &vnops;
				fp->f_data = (caddr_t)vp;
				FILE_SET_MATURE(fp, p);
			}
		}
		fdpunlock(p->p_fd);
		if (error)
			goto exec_abort;
	} else
		atomic_clearbits_int(&pr->ps_flags, PS_SUGID);

	/*
	 * Reset the saved ugids and update the process's copy of the
	 * creds if the creds have been changed
	 */
	if (cred->cr_uid != cred->cr_svuid ||
	    cred->cr_gid != cred->cr_svgid) {
		/* make sure we have unshared ucreds */
		p->p_ucred = cred = crcopy(cred);
		cred->cr_svuid = cred->cr_uid;
		cred->cr_svgid = cred->cr_gid;
	}

	if (pr->ps_ucred != cred) {
		struct ucred *ocred;

		ocred = pr->ps_ucred;
		crhold(cred);
		pr->ps_ucred = cred;
		crfree(ocred);
	}

	if (pr->ps_flags & PS_SUGIDEXEC) {
		int i, s = splclock();

		timeout_del(&pr->ps_realit_to);
		for (i = 0; i < nitems(pr->ps_timer); i++) {
			timerclear(&pr->ps_timer[i].it_interval);
			timerclear(&pr->ps_timer[i].it_value);
		}
		splx(s);
	}

	/* reset CPU time usage for the thread, but not the process */
	timespecclear(&p->p_tu.tu_runtime);
	p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;

	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);

	/*
	 * notify others that we exec'd
	 */
	KNOTE(&pr->ps_klist, NOTE_EXEC);

	/* setup new registers and do misc. setup. */
	if (pack.ep_emul->e_fixup != NULL) {
		if ((*pack.ep_emul->e_fixup)(p, &pack) != 0)
			goto free_pack_abort;
	}
#ifdef MACHINE_STACK_GROWS_UP
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval);
#else
	(*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval);
#endif

	/* map the process's signal trampoline code */
	if (exec_sigcode_map(pr, pack.ep_emul))
		goto free_pack_abort;

#ifdef __HAVE_EXEC_MD_MAP
	/* perform md specific mappings that process might need */
	if (exec_md_map(p, &pack))
		goto free_pack_abort;
#endif

	if (pr->ps_flags & PS_TRACED)
		psignal(p, SIGTRAP);

	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);

	/*
	 * Call emulation specific exec hook. This can setup per-process
	 * p->p_emuldata or do any other per-process stuff an emulation needs.
	 *
	 * If we are executing process of different emulation than the
	 * original forked process, call e_proc_exit() of the old emulation
	 * first, then e_proc_exec() of new emulation. If the emulation is
	 * same, the exec hook code should deallocate any old emulation
	 * resources held previously by this process.
	 */
	if (pr->ps_emul && pr->ps_emul->e_proc_exit &&
	    pr->ps_emul != pack.ep_emul)
		(*pr->ps_emul->e_proc_exit)(p);

	p->p_descfd = 255;
	if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
		p->p_descfd = pack.ep_fd;

	/*
	 * Call exec hook. Emulation code may NOT store reference to anything
	 * from &pack.
	 */
	if (pack.ep_emul->e_proc_exec)
		(*pack.ep_emul->e_proc_exec)(p, &pack);

#if defined(KTRACE) && defined(COMPAT_LINUX)
	/* update ps_emul, but don't ktrace it if native-execing-native */
	if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) {
		pr->ps_emul = pack.ep_emul;

		if (KTRPOINT(p, KTR_EMUL))
			ktremul(p);
	}
#else
	/* update ps_emul, the old value is no longer needed */
	pr->ps_emul = pack.ep_emul;
#endif

	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

#if NSYSTRACE > 0
	if (ISSET(p->p_flag, P_SYSTRACE) &&
	    wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC))
		systrace_execve1(pathbuf, p);
#endif

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (0);

bad:
	/* free the vmspace-creation commands, and release their references */
	kill_vmcmds(&pack.ep_vmcmds);
	/* kill any opened file descriptor, if necessary */
	if (pack.ep_flags & EXEC_HASFD) {
		pack.ep_flags &= ~EXEC_HASFD;
		fdplock(p->p_fd);
		(void) fdrelease(p, pack.ep_fd);
		fdpunlock(p->p_fd);
	}
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	/* close and put the exec'd file */
	vn_close(pack.ep_vp, FREAD, cred, p);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

 freehdr:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
#if NSYSTRACE > 0
 clrflag:
#endif
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
	single_thread_clear(p, P_SUSPSIG);

	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);

	return (error);

exec_abort:
	/*
	 * the old process doesn't exist anymore.  exit gracefully.
	 * get rid of the (new) address space we have created, if any, get rid
	 * of our namei data and vnode, and exit noting failure
	 */
	uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
		VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
	if (pack.ep_interp != NULL)
		pool_put(&namei_pool, pack.ep_interp);
	if (pack.ep_emul_arg != NULL)
		free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize);
	pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
	vn_close(pack.ep_vp, FREAD, cred, p);
	km_free(argp, NCARGS, &kv_exec, &kp_pageable);

free_pack_abort:
	free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
	if (pathbuf != NULL)
		pool_put(&namei_pool, pathbuf);
	exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL);

	/* NOTREACHED */
	atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);

	return (0);
}
Пример #26
0
/*
 * NOTE: Must be called with tty_token held
 */
static int
snp_in(struct snoop *snp, char *buf, int n)
{
	int s_free, s_tail;
	int len, nblen;
	caddr_t from, to;
	char *nbuf;

	ASSERT_LWKT_TOKEN_HELD(&tty_token);
	KASSERT(n >= 0, ("negative snoop char count"));

	if (n == 0)
		return (0);

	if (snp->snp_flags & SNOOP_DOWN) {
		kprintf("Snoop: more data to down interface.\n");
		return (0);
	}

	if (snp->snp_flags & SNOOP_OFLOW) {
		kprintf("Snoop: buffer overflow.\n");
		/*
		 * On overflow we just repeat the standart close
		 * procedure...yes , this is waste of space but.. Then next
		 * read from device will fail if one would recall he is
		 * snooping and retry...
		 */

		return (snp_down(snp));
	}
	s_tail = snp->snp_blen - (snp->snp_len + snp->snp_base);
	s_free = snp->snp_blen - snp->snp_len;


	if (n > s_free) {
		crit_enter();
		nblen = snp->snp_blen;
		while ((n > s_free) && ((nblen * 2) <= SNOOP_MAXLEN)) {
			nblen = snp->snp_blen * 2;
			s_free = nblen - (snp->snp_len + snp->snp_base);
		}
		if ((n <= s_free) && (nbuf = kmalloc(nblen, M_SNP, M_NOWAIT))) {
			bcopy(snp->snp_buf + snp->snp_base, nbuf, snp->snp_len);
			kfree(snp->snp_buf, M_SNP);
			snp->snp_buf = nbuf;
			snp->snp_blen = nblen;
			snp->snp_base = 0;
		} else {
			snp->snp_flags |= SNOOP_OFLOW;
			if (snp->snp_flags & SNOOP_RWAIT) {
				snp->snp_flags &= ~SNOOP_RWAIT;
				wakeup((caddr_t)snp);
			}
			crit_exit();
			return (0);
		}
		crit_exit();
	}
	if (n > s_tail) {
		from = (caddr_t)(snp->snp_buf + snp->snp_base);
		to = (caddr_t)(snp->snp_buf);
		len = snp->snp_len;
		bcopy(from, to, len);
		snp->snp_base = 0;
	}
	to = (caddr_t)(snp->snp_buf + snp->snp_base + snp->snp_len);
	bcopy(buf, to, n);
	snp->snp_len += n;

	if (snp->snp_flags & SNOOP_RWAIT) {
		snp->snp_flags &= ~SNOOP_RWAIT;
		wakeup((caddr_t)snp);
	}
	KNOTE(&snp->snp_kq.ki_note, 0);

	return (n);
}
Пример #27
0
int
do_sys_accept(struct lwp *l, int sock, struct mbuf **name,
    register_t *new_sock, const sigset_t *mask, int flags, int clrflags)
{
	file_t		*fp, *fp2;
	struct mbuf	*nam;
	int		error, fd;
	struct socket	*so, *so2;
	short		wakeup_state = 0;

	if ((fp = fd_getfile(sock)) == NULL)
		return EBADF;
	if (fp->f_type != DTYPE_SOCKET) {
		fd_putfile(sock);
		return ENOTSOCK;
	}
	if ((error = fd_allocfile(&fp2, &fd)) != 0) {
		fd_putfile(sock);
		return error;
	}
	nam = m_get(M_WAIT, MT_SONAME);
	*new_sock = fd;
	so = fp->f_socket;
	solock(so);

	if (__predict_false(mask))
		sigsuspendsetup(l, mask);

	if (!(so->so_proto->pr_flags & PR_LISTEN)) {
		error = EOPNOTSUPP;
		goto bad;
	}
	if ((so->so_options & SO_ACCEPTCONN) == 0) {
		error = EINVAL;
		goto bad;
	}
	if ((so->so_state & SS_NBIO) && so->so_qlen == 0) {
		error = EWOULDBLOCK;
		goto bad;
	}
	while (so->so_qlen == 0 && so->so_error == 0) {
		if (so->so_state & SS_CANTRCVMORE) {
			so->so_error = ECONNABORTED;
			break;
		}
		if (wakeup_state & SS_RESTARTSYS) {
			error = ERESTART;
			goto bad;
		}
		error = sowait(so, true, 0);
		if (error) {
			goto bad;
		}
		wakeup_state = so->so_state;
	}
	if (so->so_error) {
		error = so->so_error;
		so->so_error = 0;
		goto bad;
	}
	/* connection has been removed from the listen queue */
	KNOTE(&so->so_rcv.sb_sel.sel_klist, NOTE_SUBMIT);
	so2 = TAILQ_FIRST(&so->so_q);
	if (soqremque(so2, 1) == 0)
		panic("accept");
	fp2->f_type = DTYPE_SOCKET;
	fp2->f_flag = (fp->f_flag & ~clrflags) |
	    ((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)|
	    ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0);
	fp2->f_ops = &socketops;
	fp2->f_socket = so2;
	if (fp2->f_flag & FNONBLOCK)
		so2->so_state |= SS_NBIO;
	else
		so2->so_state &= ~SS_NBIO;
	error = soaccept(so2, nam);
	so2->so_cred = kauth_cred_dup(so->so_cred);
	sounlock(so);
	if (error) {
		/* an error occurred, free the file descriptor and mbuf */
		m_freem(nam);
		mutex_enter(&fp2->f_lock);
		fp2->f_count++;
		mutex_exit(&fp2->f_lock);
		closef(fp2);
		fd_abort(curproc, NULL, fd);
	} else {
		fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0);
		fd_affix(curproc, fp2, fd);
		*name = nam;
	}
	fd_putfile(sock);
	if (__predict_false(mask))
		sigsuspendteardown(l);
	return error;
 bad:
	sounlock(so);
	m_freem(nam);
	fd_putfile(sock);
	fd_abort(curproc, fp2, fd);
	if (__predict_false(mask))
		sigsuspendteardown(l);
	return error;
}