示例#1
0
/**
 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
 *
 * @rdev: radeon device pointer
 * @target_seq: sequence number(s) we want to wait for
 * @intr: use interruptable sleep
 *
 * Wait for the requested sequence number(s) to be written by any ring
 * (all asics).  Sequnce number array is indexed by ring id.
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the sequence number.  Helper function
 * for radeon_fence_wait_any(), et al.
 * Returns 0 if the sequence number has passed, error for all other cases.
 */
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
                                     u64 *target_seq, bool intr)
{
    unsigned long timeout, last_activity, tmp;
    unsigned i, ring = RADEON_NUM_RINGS;
    bool signaled, fence_queue_locked;
    int r;

    for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
        if (!target_seq[i]) {
            continue;
        }

        /* use the most recent one as indicator */
        if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
            last_activity = rdev->fence_drv[i].last_activity;
        }

        /* For lockup detection just pick the lowest ring we are
         * actively waiting for
         */
        if (i < ring) {
            ring = i;
        }
    }

    /* nothing to wait for ? */
    if (ring == RADEON_NUM_RINGS) {
        return -ENOENT;
    }

    while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
        timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
        if (time_after(last_activity, timeout)) {
            /* the normal case, timeout is somewhere before last_activity */
            timeout = last_activity - timeout;
        } else {
            /* either jiffies wrapped around, or no fence was signaled in the last 500ms
             * anyway we will just wait for the minimum amount and then check for a lockup
             */
            timeout = 1;
        }

        CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)",
             ring, target_seq[ring]);
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
            if (target_seq[i]) {
                radeon_irq_kms_sw_irq_get(rdev, i);
            }
        }
        fence_queue_locked = false;
        r = 0;
        while (!(signaled = radeon_fence_any_seq_signaled(rdev,
                            target_seq))) {
            if (!fence_queue_locked) {
                mtx_lock(&rdev->fence_queue_mtx);
                fence_queue_locked = true;
            }
            if (intr) {
                r = cv_timedwait_sig(&rdev->fence_queue,
                                     &rdev->fence_queue_mtx,
                                     timeout);
            } else {
                r = cv_timedwait(&rdev->fence_queue,
                                 &rdev->fence_queue_mtx,
                                 timeout);
            }
            if (r == EINTR)
                r = ERESTARTSYS;
            if (r != 0) {
                if (r == EWOULDBLOCK) {
                    signaled =
                        radeon_fence_any_seq_signaled(
                            rdev, target_seq);
                }
                break;
            }
        }
        if (fence_queue_locked) {
            mtx_unlock(&rdev->fence_queue_mtx);
        }
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
            if (target_seq[i]) {
                radeon_irq_kms_sw_irq_put(rdev, i);
            }
        }
        if (unlikely(r == ERESTARTSYS)) {
            return -r;
        }
        CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)",
             ring, target_seq[ring]);

        if (unlikely(!signaled)) {
#ifndef __FreeBSD__
            /* we were interrupted for some reason and fence
             * isn't signaled yet, resume waiting */
            if (r) {
                continue;
            }
#endif

            sx_xlock(&rdev->ring_lock);
            for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
                if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
                    tmp = rdev->fence_drv[i].last_activity;
                }
            }
            /* test if somebody else has already decided that this is a lockup */
            if (last_activity != tmp) {
                last_activity = tmp;
                sx_xunlock(&rdev->ring_lock);
                continue;
            }

            if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
                /* good news we believe it's a lockup */
                dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx)\n",
                         (uintmax_t)target_seq[ring]);

                /* change last activity so nobody else think there is a lockup */
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                    rdev->fence_drv[i].last_activity = jiffies;
                }

                /* mark the ring as not ready any more */
                rdev->ring[ring].ready = false;
                sx_xunlock(&rdev->ring_lock);
                return -EDEADLK;
            }
            sx_xunlock(&rdev->ring_lock);
        }
    }
    return 0;
}
示例#2
0
static int
procfs_control(struct thread *td, struct proc *p, int op)
{
	int error = 0;
	struct thread *temp;

	/*
	 * Attach - attaches the target process for debugging
	 * by the calling process.
	 */
	if (op == PROCFS_CTL_ATTACH) {
		sx_xlock(&proctree_lock);
		PROC_LOCK(p);
		if ((error = p_candebug(td, p)) != 0)
			goto out;
		if (p->p_flag & P_TRACED) {
			error = EBUSY;
			goto out;
		}

		/* Can't trace yourself! */
		if (p->p_pid == td->td_proc->p_pid) {
			error = EINVAL;
			goto out;
		}

		/*
		 * Go ahead and set the trace flag.
		 * Save the old parent (it's reset in
		 *   _DETACH, and also in kern_exit.c:wait4()
		 * Reparent the process so that the tracing
		 *   proc gets to see all the action.
		 * Stop the target.
		 */
		p->p_flag |= P_TRACED;
		faultin(p);
		p->p_xstat = 0;		/* XXX ? */
		if (p->p_pptr != td->td_proc) {
			p->p_oppid = p->p_pptr->p_pid;
			proc_reparent(p, td->td_proc);
		}
		psignal(p, SIGSTOP);
out:
		PROC_UNLOCK(p);
		sx_xunlock(&proctree_lock);
		return (error);
	}

	/*
	 * Authorization check: rely on normal debugging protection, except
	 * allow processes to disengage debugging on a process onto which
	 * they have previously attached, but no longer have permission to
	 * debug.
	 */
	PROC_LOCK(p);
	if (op != PROCFS_CTL_DETACH &&
	    ((error = p_candebug(td, p)))) {
		PROC_UNLOCK(p);
		return (error);
	}

	/*
	 * Target process must be stopped, owned by (td) and
	 * be set up for tracing (P_TRACED flag set).
	 * Allow DETACH to take place at any time for sanity.
	 * Allow WAIT any time, of course.
	 */
	switch (op) {
	case PROCFS_CTL_DETACH:
	case PROCFS_CTL_WAIT:
		break;

	default:
		if (!TRACE_WAIT_P(td->td_proc, p)) {
			PROC_UNLOCK(p);
			return (EBUSY);
		}
	}


#ifdef FIX_SSTEP
	/*
	 * do single-step fixup if needed
	 */
	FIX_SSTEP(FIRST_THREAD_IN_PROC(p));
#endif

	/*
	 * Don't deliver any signal by default.
	 * To continue with a signal, just send
	 * the signal name to the ctl file
	 */
	p->p_xstat = 0;

	switch (op) {
	/*
	 * Detach.  Cleans up the target process, reparent it if possible
	 * and set it running once more.
	 */
	case PROCFS_CTL_DETACH:
		/* if not being traced, then this is a painless no-op */
		if ((p->p_flag & P_TRACED) == 0) {
			PROC_UNLOCK(p);
			return (0);
		}

		/* not being traced any more */
		p->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);

		/* remove pending SIGTRAP, else the process will die */
		sigqueue_delete_proc(p, SIGTRAP);
		FOREACH_THREAD_IN_PROC(p, temp)
			temp->td_dbgflags &= ~TDB_SUSPEND;
		PROC_UNLOCK(p);

		/* give process back to original parent */
		sx_xlock(&proctree_lock);
		if (p->p_oppid != p->p_pptr->p_pid) {
			struct proc *pp;

			pp = pfind(p->p_oppid);
			PROC_LOCK(p);
			if (pp) {
				PROC_UNLOCK(pp);
				proc_reparent(p, pp);
			}
		} else
			PROC_LOCK(p);
		p->p_oppid = 0;
		p->p_flag &= ~P_WAITED;	/* XXX ? */
		sx_xunlock(&proctree_lock);

		wakeup(td->td_proc);	/* XXX for CTL_WAIT below ? */

		break;

	/*
	 * Step.  Let the target process execute a single instruction.
	 * What does it mean to single step a threaded program?
	 */
	case PROCFS_CTL_STEP:
		error = proc_sstep(FIRST_THREAD_IN_PROC(p));
		if (error) {
			PROC_UNLOCK(p);
			return (error);
		}
		break;

	/*
	 * Run.  Let the target process continue running until a breakpoint
	 * or some other trap.
	 */
	case PROCFS_CTL_RUN:
		p->p_flag &= ~P_STOPPED_SIG;	/* this uses SIGSTOP */
		break;

	/*
	 * Wait for the target process to stop.
	 * If the target is not being traced then just wait
	 * to enter
	 */
	case PROCFS_CTL_WAIT:
		if (p->p_flag & P_TRACED) {
			while (error == 0 &&
					(P_SHOULDSTOP(p)) &&
					(p->p_flag & P_TRACED) &&
					(p->p_pptr == td->td_proc))
				error = msleep(p, &p->p_mtx,
						PWAIT|PCATCH, "procfsx", 0);
			if (error == 0 && !TRACE_WAIT_P(td->td_proc, p))
				error = EBUSY;
		} else {
			while (error == 0 && P_SHOULDSTOP(p))
				error = msleep(p, &p->p_mtx,
						PWAIT|PCATCH, "procfs", 0);
		}
		PROC_UNLOCK(p);
		return (error);
	default:
		panic("procfs_control");
	}

	PROC_SLOCK(p);
	thread_unsuspend(p); /* If it can run, let it do so. */
	PROC_SUNLOCK(p);
	PROC_UNLOCK(p);
	return (0);
}
示例#3
0
static int
t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
{
	struct match_rr mrr;
	struct adapter *sc;
	struct ifnet *ifp;
	int rc, unit;
	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};

	mrr.name = name;
	mrr.lock = 1;
	mrr.sc = NULL;
	mrr.rc = ENOENT;
	t4_iterate(match_name, &mrr);

	if (mrr.rc != 0)
		return (mrr.rc);
	sc = mrr.sc;

	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
	    __func__, name));
	ASSERT_SYNCHRONIZED_OP(sc);

	sx_xlock(&t4_trace_lock);

	if (sc->ifp != NULL) {
		rc = EEXIST;
		goto done;
	}
	if (sc->traceq < 0) {
		rc = EAGAIN;
		goto done;
	}


	unit = -1;
	rc = ifc_alloc_unit(ifc, &unit);
	if (rc != 0)
		goto done;

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		ifc_free_unit(ifc, unit);
		rc = ENOMEM;
		goto done;
	}

	/* Note that if_xname is not <if_dname><if_dunit>. */
	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
	ifp->if_dname = t4_cloner_name;
	ifp->if_dunit = unit;
	ifp->if_init = tracer_init;
	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
	ifp->if_ioctl = tracer_ioctl;
	ifp->if_transmit = tracer_transmit;
	ifp->if_qflush = tracer_qflush;
	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
	    tracer_media_status);
	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
	ether_ifattach(ifp, lla);

	mtx_lock(&sc->ifp_lock);
	ifp->if_softc = sc;
	sc->ifp = ifp;
	mtx_unlock(&sc->ifp_lock);
done:
	sx_xunlock(&t4_trace_lock);
	end_synchronized_op(sc, 0);
	return (rc);
}
示例#4
0
int
linux_clone(struct thread *td, struct linux_clone_args *args)
{
	int error, ff = RFPROC | RFSTOPPED;
	struct proc *p2;
	struct thread *td2;
	int exit_signal;
	struct linux_emuldata *em;

#ifdef DEBUG
	if (ldebug(clone)) {
		printf(ARGS(clone, "flags %x, stack %p, parent tid: %p, "
		    "child tid: %p"), (unsigned)args->flags,
		    args->stack, args->parent_tidptr, args->child_tidptr);
	}
#endif

	exit_signal = args->flags & 0x000000ff;
	if (LINUX_SIG_VALID(exit_signal)) {
		if (exit_signal <= LINUX_SIGTBLSZ)
			exit_signal =
			    linux_to_bsd_signal[_SIG_IDX(exit_signal)];
	} else if (exit_signal != 0)
		return (EINVAL);

	if (args->flags & LINUX_CLONE_VM)
		ff |= RFMEM;
	if (args->flags & LINUX_CLONE_SIGHAND)
		ff |= RFSIGSHARE;
	/*
	 * XXX: In Linux, sharing of fs info (chroot/cwd/umask)
	 * and open files is independant.  In FreeBSD, its in one
	 * structure but in reality it does not cause any problems
	 * because both of these flags are usually set together.
	 */
	if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
		ff |= RFFDG;

	/*
	 * Attempt to detect when linux_clone(2) is used for creating
	 * kernel threads. Unfortunately despite the existence of the
	 * CLONE_THREAD flag, version of linuxthreads package used in
	 * most popular distros as of beginning of 2005 doesn't make
	 * any use of it. Therefore, this detection relies on
	 * empirical observation that linuxthreads sets certain
	 * combination of flags, so that we can make more or less
	 * precise detection and notify the FreeBSD kernel that several
	 * processes are in fact part of the same threading group, so
	 * that special treatment is necessary for signal delivery
	 * between those processes and fd locking.
	 */
	if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS)
		ff |= RFTHREAD;

	if (args->flags & LINUX_CLONE_PARENT_SETTID)
		if (args->parent_tidptr == NULL)
			return (EINVAL);

	error = fork1(td, ff, 0, &p2, NULL, 0);
	if (error)
		return (error);

	if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD)) {
	   	sx_xlock(&proctree_lock);
		PROC_LOCK(p2);
		proc_reparent(p2, td->td_proc->p_pptr);
		PROC_UNLOCK(p2);
		sx_xunlock(&proctree_lock);
	}

	/* create the emuldata */
	error = linux_proc_init(td, p2->p_pid, args->flags);
	/* reference it - no need to check this */
	em = em_find(p2, EMUL_DOLOCK);
	KASSERT(em != NULL, ("clone: emuldata not found."));
	/* and adjust it */

	if (args->flags & LINUX_CLONE_THREAD) {
#ifdef notyet
	   	PROC_LOCK(p2);
	   	p2->p_pgrp = td->td_proc->p_pgrp;
	   	PROC_UNLOCK(p2);
#endif
		exit_signal = 0;
	}

	if (args->flags & LINUX_CLONE_CHILD_SETTID)
		em->child_set_tid = args->child_tidptr;
	else
	   	em->child_set_tid = NULL;

	if (args->flags & LINUX_CLONE_CHILD_CLEARTID)
		em->child_clear_tid = args->child_tidptr;
	else
	   	em->child_clear_tid = NULL;

	EMUL_UNLOCK(&emul_lock);

	if (args->flags & LINUX_CLONE_PARENT_SETTID) {
		error = copyout(&p2->p_pid, args->parent_tidptr,
		    sizeof(p2->p_pid));
		if (error)
			printf(LMSG("copyout failed!"));
	}

	PROC_LOCK(p2);
	p2->p_sigparent = exit_signal;
	PROC_UNLOCK(p2);
	td2 = FIRST_THREAD_IN_PROC(p2);
	/*
	 * In a case of stack = NULL, we are supposed to COW calling process
	 * stack. This is what normal fork() does, so we just keep tf_rsp arg
	 * intact.
	 */
	if (args->stack)
		linux_set_upcall_kse(td2, PTROUT(args->stack));

	if (args->flags & LINUX_CLONE_SETTLS)
		linux_set_cloned_tls(td2, args->tls);

#ifdef DEBUG
	if (ldebug(clone))
		printf(LMSG("clone: successful rfork to %d, "
		    "stack %p sig = %d"), (int)p2->p_pid, args->stack,
		    exit_signal);
#endif
	if (args->flags & LINUX_CLONE_VFORK) {
	   	PROC_LOCK(p2);
	   	p2->p_flag |= P_PPWAIT;
	   	PROC_UNLOCK(p2);
	}

	/*
	 * Make this runnable after we are finished with it.
	 */
	thread_lock(td2);
	TD_SET_CAN_RUN(td2);
	sched_add(td2, SRQ_BORING);
	thread_unlock(td2);

	td->td_retval[0] = p2->p_pid;
	td->td_retval[1] = 0;

	if (args->flags & LINUX_CLONE_VFORK) {
		/* wait for the children to exit, ie. emulate vfork */
		PROC_LOCK(p2);
		while (p2->p_flag & P_PPWAIT)
			cv_wait(&p2->p_pwait, &p2->p_mtx);
		PROC_UNLOCK(p2);
	}

	return (0);
}
示例#5
0
static int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
	struct pcpu *pc;

	critical_enter();
	pc = pcpu_find(curcpu);

	/* Check if we just need to do a proper critical_exit. */
	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
		critical_exit();
		return (1);
	}

	/* Remove our tracker from the per-cpu list. */
	rm_tracker_remove(pc, tracker);

	/* Check to see if the IPI granted us the lock after all. */
	if (tracker->rmp_flags) {
		/* Just add back tracker - we hold the lock. */
		rm_tracker_add(pc, tracker);
		critical_exit();
		return (1);
	}

	/*
	 * We allow readers to aquire a lock even if a writer is blocked if
	 * the lock is recursive and the reader already holds the lock.
	 */
	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		/*
		 * Just grant the lock if this thread already has a tracker
		 * for this lock on the per-cpu queue.
		 */
		if (rm_trackers_present(pc, rm, curthread) != 0) {
			mtx_lock_spin(&rm_spinlock);
			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
			    rmp_qentry);
			tracker->rmp_flags = RMPF_ONQUEUE;
			mtx_unlock_spin(&rm_spinlock);
			rm_tracker_add(pc, tracker);
			critical_exit();
			return (1);
		}
	}

	sched_unpin();
	critical_exit();

	if (trylock) {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			if (!sx_try_xlock(&rm->rm_lock_sx))
				return (0);
		} else {
			if (!mtx_trylock(&rm->rm_lock_mtx))
				return (0);
		}
	} else {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE)
			sx_xlock(&rm->rm_lock_sx);
		else
			mtx_lock(&rm->rm_lock_mtx);
	}

	critical_enter();
	pc = pcpu_find(curcpu);
	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
	rm_tracker_add(pc, tracker);
	sched_pin();
	critical_exit();

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		sx_xunlock(&rm->rm_lock_sx);
	else
		mtx_unlock(&rm->rm_lock_mtx);

	return (1);
}
示例#6
0
文件: svc_dg.c 项目: AhmadTux/freebsd
static bool_t
svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg,
    struct sockaddr **addrp, struct mbuf **mp)
{
	struct uio uio;
	struct sockaddr *raddr;
	struct mbuf *mreq;
	XDR xdrs;
	int error, rcvflag;

	/*
	 * Serialise access to the socket.
	 */
	sx_xlock(&xprt->xp_lock);

	/*
	 * The socket upcall calls xprt_active() which will eventually
	 * cause the server to call us here. We attempt to read a
	 * packet from the socket and process it. If the read fails,
	 * we have drained all pending requests so we call
	 * xprt_inactive().
	 */
	uio.uio_resid = 1000000000;
	uio.uio_td = curthread;
	mreq = NULL;
	rcvflag = MSG_DONTWAIT;
	error = soreceive(xprt->xp_socket, &raddr, &uio, &mreq, NULL, &rcvflag);

	if (error == EWOULDBLOCK) {
		/*
		 * We must re-test for readability after taking the
		 * lock to protect us in the case where a new packet
		 * arrives on the socket after our call to soreceive
		 * fails with EWOULDBLOCK. The pool lock protects us
		 * from racing the upcall after our soreadable() call
		 * returns false.
		 */
		mtx_lock(&xprt->xp_pool->sp_lock);
		if (!soreadable(xprt->xp_socket))
			xprt_inactive_locked(xprt);
		mtx_unlock(&xprt->xp_pool->sp_lock);
		sx_xunlock(&xprt->xp_lock);
		return (FALSE);
	}

	if (error) {
		SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
		soupcall_clear(xprt->xp_socket, SO_RCV);
		SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
		xprt_inactive(xprt);
		sx_xunlock(&xprt->xp_lock);
		return (FALSE);
	}

	sx_xunlock(&xprt->xp_lock);

	xdrmbuf_create(&xdrs, mreq, XDR_DECODE);
	if (! xdr_callmsg(&xdrs, msg)) {
		XDR_DESTROY(&xdrs);
		return (FALSE);
	}

	*addrp = raddr;
	*mp = xdrmbuf_getall(&xdrs);
	XDR_DESTROY(&xdrs);

	return (TRUE);
}
示例#7
0
int
fork1(struct thread *td, struct fork_req *fr)
{
	struct proc *p1, *newproc;
	struct thread *td2;
	struct vmspace *vm2;
	struct file *fp_procdesc;
	vm_ooffset_t mem_charged;
	int error, nprocs_new, ok;
	static int curfail;
	static struct timeval lastfail;
	int flags, pages;

	flags = fr->fr_flags;
	pages = fr->fr_pages;

	if ((flags & RFSTOPPED) != 0)
		MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL);
	else
		MPASS(fr->fr_procp == NULL);

	/* Check for the undefined or unimplemented flags. */
	if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
		return (EINVAL);

	/* Signal value requires RFTSIGZMB. */
	if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
		return (EINVAL);

	/* Can't copy and clear. */
	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
		return (EINVAL);

	/* Check the validity of the signal number. */
	if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
		return (EINVAL);

	if ((flags & RFPROCDESC) != 0) {
		/* Can't not create a process yet get a process descriptor. */
		if ((flags & RFPROC) == 0)
			return (EINVAL);

		/* Must provide a place to put a procdesc if creating one. */
		if (fr->fr_pd_fd == NULL)
			return (EINVAL);

		/* Check if we are using supported flags. */
		if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0)
			return (EINVAL);
	}

	p1 = td->td_proc;

	/*
	 * Here we don't create a new process, but we divorce
	 * certain parts of a process from itself.
	 */
	if ((flags & RFPROC) == 0) {
		if (fr->fr_procp != NULL)
			*fr->fr_procp = NULL;
		else if (fr->fr_pidp != NULL)
			*fr->fr_pidp = 0;
		return (fork_norfproc(td, flags));
	}

	fp_procdesc = NULL;
	newproc = NULL;
	vm2 = NULL;

	/*
	 * Increment the nprocs resource before allocations occur.
	 * Although process entries are dynamically created, we still
	 * keep a global limit on the maximum number we will
	 * create. There are hard-limits as to the number of processes
	 * that can run, established by the KVA and memory usage for
	 * the process data.
	 *
	 * Don't allow a nonprivileged user to use the last ten
	 * processes; don't let root exceed the limit.
	 */
	nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
	if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred,
	    PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) {
		error = EAGAIN;
		sx_xlock(&allproc_lock);
		if (ppsratecheck(&lastfail, &curfail, 1)) {
			printf("maxproc limit exceeded by uid %u (pid %d); "
			    "see tuning(7) and login.conf(5)\n",
			    td->td_ucred->cr_ruid, p1->p_pid);
		}
		sx_xunlock(&allproc_lock);
		goto fail2;
	}

	/*
	 * If required, create a process descriptor in the parent first; we
	 * will abandon it if something goes wrong. We don't finit() until
	 * later.
	 */
	if (flags & RFPROCDESC) {
		error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd,
		    fr->fr_pd_flags, fr->fr_pd_fcaps);
		if (error != 0)
			goto fail2;
	}

	mem_charged = 0;
	if (pages == 0)
		pages = kstack_pages;
	/* Allocate new proc. */
	newproc = uma_zalloc(proc_zone, M_WAITOK);
	td2 = FIRST_THREAD_IN_PROC(newproc);
	if (td2 == NULL) {
		td2 = thread_alloc(pages);
		if (td2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		proc_linkup(newproc, td2);
	} else {
		if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
			if (td2->td_kstack != 0)
				vm_thread_dispose(td2);
			if (!thread_alloc_stack(td2, pages)) {
				error = ENOMEM;
				goto fail2;
			}
		}
	}

	if ((flags & RFMEM) == 0) {
		vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
		if (vm2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		if (!swap_reserve(mem_charged)) {
			/*
			 * The swap reservation failed. The accounting
			 * from the entries of the copied vm2 will be
			 * subtracted in vmspace_free(), so force the
			 * reservation there.
			 */
			swap_reserve_force(mem_charged);
			error = ENOMEM;
			goto fail2;
		}
	} else
		vm2 = NULL;

	/*
	 * XXX: This is ugly; when we copy resource usage, we need to bump
	 *      per-cred resource counters.
	 */
	proc_set_cred_init(newproc, crhold(td->td_ucred));

	/*
	 * Initialize resource accounting for the child process.
	 */
	error = racct_proc_fork(p1, newproc);
	if (error != 0) {
		error = EAGAIN;
		goto fail1;
	}

#ifdef MAC
	mac_proc_init(newproc);
#endif
	newproc->p_klist = knlist_alloc(&newproc->p_mtx);
	STAILQ_INIT(&newproc->p_ktr);

	/* We have to lock the process tree while we look for a pid. */
	sx_slock(&proctree_lock);
	sx_xlock(&allproc_lock);

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit.
	 *
	 * XXXRW: Can we avoid privilege here if it's not needed?
	 */
	error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
	if (error == 0)
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
	else {
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
		    lim_cur(td, RLIMIT_NPROC));
	}
	if (ok) {
		do_fork(td, fr, newproc, td2, vm2, fp_procdesc);
		return (0);
	}

	error = EAGAIN;
	sx_sunlock(&proctree_lock);
	sx_xunlock(&allproc_lock);
#ifdef MAC
	mac_proc_destroy(newproc);
#endif
	racct_proc_exit(newproc);
fail1:
	crfree(newproc->p_ucred);
	newproc->p_ucred = NULL;
fail2:
	if (vm2 != NULL)
		vmspace_free(vm2);
	uma_zfree(proc_zone, newproc);
	if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
		fdclose(td, fp_procdesc, *fr->fr_pd_fd);
		fdrop(fp_procdesc, td);
	}
	atomic_add_int(&nprocs, -1);
	pause("fork", hz / 2);
	return (error);
}
static void
ufs_extattr_uepm_unlock(struct myfs_ufsmount *ump, struct thread *td)
{

	sx_xunlock(&ump->um_extattr.uepm_lock);
}
/*------------------------------------------------------------------------*
 *	usbd_do_request_flags and usbd_do_request
 *
 * Description of arguments passed to these functions:
 *
 * "udev" - this is the "usb_device" structure pointer on which the
 * request should be performed. It is possible to call this function
 * in both Host Side mode and Device Side mode.
 *
 * "mtx" - if this argument is non-NULL the mutex pointed to by it
 * will get dropped and picked up during the execution of this
 * function, hence this function sometimes needs to sleep. If this
 * argument is NULL it has no effect.
 *
 * "req" - this argument must always be non-NULL and points to an
 * 8-byte structure holding the USB request to be done. The USB
 * request structure has a bit telling the direction of the USB
 * request, if it is a read or a write.
 *
 * "data" - if the "wLength" part of the structure pointed to by "req"
 * is non-zero this argument must point to a valid kernel buffer which
 * can hold at least "wLength" bytes. If "wLength" is zero "data" can
 * be NULL.
 *
 * "flags" - here is a list of valid flags:
 *
 *  o USB_SHORT_XFER_OK: allows the data transfer to be shorter than
 *  specified
 *
 *  o USB_DELAY_STATUS_STAGE: allows the status stage to be performed
 *  at a later point in time. This is tunable by the "hw.usb.ss_delay"
 *  sysctl. This flag is mostly useful for debugging.
 *
 *  o USB_USER_DATA_PTR: treat the "data" pointer like a userland
 *  pointer.
 *
 * "actlen" - if non-NULL the actual transfer length will be stored in
 * the 16-bit unsigned integer pointed to by "actlen". This
 * information is mostly useful when the "USB_SHORT_XFER_OK" flag is
 * used.
 *
 * "timeout" - gives the timeout for the control transfer in
 * milliseconds. A "timeout" value less than 50 milliseconds is
 * treated like a 50 millisecond timeout. A "timeout" value greater
 * than 30 seconds is treated like a 30 second timeout. This USB stack
 * does not allow control requests without a timeout.
 *
 * NOTE: This function is thread safe. All calls to
 * "usbd_do_request_flags" will be serialised by the use of an
 * internal "sx_lock".
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
usb_error_t
usbd_do_request_flags(struct usb_device *udev, struct mtx *mtx,
    struct usb_device_request *req, void *data, uint16_t flags,
    uint16_t *actlen, usb_timeout_t timeout)
{
	usb_handle_req_t *hr_func;
	struct usb_xfer *xfer;
	const void *desc;
	int err = 0;
	usb_ticks_t start_ticks;
	usb_ticks_t delta_ticks;
	usb_ticks_t max_ticks;
	uint16_t length;
	uint16_t temp;

	if (timeout < 50) {
		/* timeout is too small */
		timeout = 50;
	}
	if (timeout > 30000) {
		/* timeout is too big */
		timeout = 30000;
	}
	length = UGETW(req->wLength);

	DPRINTFN(5, "udev=%p bmRequestType=0x%02x bRequest=0x%02x "
	    "wValue=0x%02x%02x wIndex=0x%02x%02x wLength=0x%02x%02x\n",
	    udev, req->bmRequestType, req->bRequest,
	    req->wValue[1], req->wValue[0],
	    req->wIndex[1], req->wIndex[0],
	    req->wLength[1], req->wLength[0]);

	/* Check if the device is still alive */
	if (udev->state < USB_STATE_POWERED) {
		DPRINTF("usb device has gone\n");
		return (USB_ERR_NOT_CONFIGURED);
	}

	/*
	 * Set "actlen" to a known value in case the caller does not
	 * check the return value:
	 */
	if (actlen)
		*actlen = 0;

#if (USB_HAVE_USER_IO == 0)
	if (flags & USB_USER_DATA_PTR)
		return (USB_ERR_INVAL);
#endif
	if (mtx) {
		mtx_unlock(mtx);
		if (mtx != &Giant) {
			mtx_assert(mtx, MA_NOTOWNED);
		}
	}
	/*
	 * Grab the default sx-lock so that serialisation
	 * is achieved when multiple threads are involved:
	 */

	sx_xlock(udev->default_sx);

	hr_func = usbd_get_hr_func(udev);

	if (hr_func != NULL) {
		DPRINTF("Handle Request function is set\n");

		desc = NULL;
		temp = 0;

		if (!(req->bmRequestType & UT_READ)) {
			if (length != 0) {
				DPRINTFN(1, "The handle request function "
				    "does not support writing data!\n");
				err = USB_ERR_INVAL;
				goto done;
			}
		}

		/* The root HUB code needs the BUS lock locked */

		USB_BUS_LOCK(udev->bus);
		err = (hr_func) (udev, req, &desc, &temp);
		USB_BUS_UNLOCK(udev->bus);

		if (err)
			goto done;

		if (length > temp) {
			if (!(flags & USB_SHORT_XFER_OK)) {
				err = USB_ERR_SHORT_XFER;
				goto done;
			}
			length = temp;
		}
		if (actlen)
			*actlen = length;

		if (length > 0) {
#if USB_HAVE_USER_IO
			if (flags & USB_USER_DATA_PTR) {
				if (copyout(desc, data, length)) {
					err = USB_ERR_INVAL;
					goto done;
				}
			} else
#endif
				bcopy(desc, data, length);
		}
		goto done;		/* success */
	}

	/*
	 * Setup a new USB transfer or use the existing one, if any:
	 */
	usbd_default_transfer_setup(udev);

	xfer = udev->default_xfer[0];
	if (xfer == NULL) {
		/* most likely out of memory */
		err = USB_ERR_NOMEM;
		goto done;
	}
	USB_XFER_LOCK(xfer);

	if (flags & USB_DELAY_STATUS_STAGE)
		xfer->flags.manual_status = 1;
	else
		xfer->flags.manual_status = 0;

	if (flags & USB_SHORT_XFER_OK)
		xfer->flags.short_xfer_ok = 1;
	else
		xfer->flags.short_xfer_ok = 0;

	xfer->timeout = timeout;

	start_ticks = ticks;

	max_ticks = USB_MS_TO_TICKS(timeout);

	usbd_copy_in(xfer->frbuffers, 0, req, sizeof(*req));

	usbd_xfer_set_frame_len(xfer, 0, sizeof(*req));
	xfer->nframes = 2;

	while (1) {
		temp = length;
		if (temp > xfer->max_data_length) {
			temp = usbd_xfer_max_len(xfer);
		}
		usbd_xfer_set_frame_len(xfer, 1, temp);

		if (temp > 0) {
			if (!(req->bmRequestType & UT_READ)) {
#if USB_HAVE_USER_IO
				if (flags & USB_USER_DATA_PTR) {
					USB_XFER_UNLOCK(xfer);
					err = usbd_copy_in_user(xfer->frbuffers + 1,
					    0, data, temp);
					USB_XFER_LOCK(xfer);
					if (err) {
						err = USB_ERR_INVAL;
						break;
					}
				} else
#endif
					usbd_copy_in(xfer->frbuffers + 1,
					    0, data, temp);
			}
			xfer->nframes = 2;
		} else {
			if (xfer->frlengths[0] == 0) {
				if (xfer->flags.manual_status) {
#if USB_DEBUG
					int temp;

					temp = usb_ss_delay;
					if (temp > 5000) {
						temp = 5000;
					}
					if (temp > 0) {
						usb_pause_mtx(
						    xfer->xroot->xfer_mtx,
						    USB_MS_TO_TICKS(temp));
					}
#endif
					xfer->flags.manual_status = 0;
				} else {
					break;
				}
			}
			xfer->nframes = 1;
		}

		usbd_transfer_start(xfer);

		while (usbd_transfer_pending(xfer)) {
			cv_wait(udev->default_cv,
			    xfer->xroot->xfer_mtx);
		}

		err = xfer->error;

		if (err) {
			break;
		}
		/* subtract length of SETUP packet, if any */

		if (xfer->aframes > 0) {
			xfer->actlen -= xfer->frlengths[0];
		} else {
			xfer->actlen = 0;
		}

		/* check for short packet */

		if (temp > xfer->actlen) {
			temp = xfer->actlen;
			length = temp;
		}
		if (temp > 0) {
			if (req->bmRequestType & UT_READ) {
#if USB_HAVE_USER_IO
				if (flags & USB_USER_DATA_PTR) {
					USB_XFER_UNLOCK(xfer);
					err = usbd_copy_out_user(xfer->frbuffers + 1,
					    0, data, temp);
					USB_XFER_LOCK(xfer);
					if (err) {
						err = USB_ERR_INVAL;
						break;
					}
				} else
#endif
					usbd_copy_out(xfer->frbuffers + 1,
					    0, data, temp);
			}
		}
		/*
		 * Clear "frlengths[0]" so that we don't send the setup
		 * packet again:
		 */
		usbd_xfer_set_frame_len(xfer, 0, 0);

		/* update length and data pointer */
		length -= temp;
		data = USB_ADD_BYTES(data, temp);

		if (actlen) {
			(*actlen) += temp;
		}
		/* check for timeout */

		delta_ticks = ticks - start_ticks;
		if (delta_ticks > max_ticks) {
			if (!err) {
				err = USB_ERR_TIMEOUT;
			}
		}
		if (err) {
			break;
		}
	}

	if (err) {
		/*
		 * Make sure that the control endpoint is no longer
		 * blocked in case of a non-transfer related error:
		 */
		usbd_transfer_stop(xfer);
	}
	USB_XFER_UNLOCK(xfer);

done:
	sx_xunlock(udev->default_sx);

	if (mtx) {
		mtx_lock(mtx);
	}
	return ((usb_error_t)err);
}
示例#10
0
/*
 * Exit: deallocate address space and other resources, change proc state to
 * zombie, and unlink proc from allproc and parent's lists.  Save exit status
 * and rusage for wait().  Check for child processes and orphan them.
 */
void
exit1(struct thread *td, int rv)
{
	struct proc *p, *nq, *q;
	struct vnode *ttyvp = NULL;

	mtx_assert(&Giant, MA_NOTOWNED);

	p = td->td_proc;
	/*
	 * XXX in case we're rebooting we just let init die in order to
	 * work around an unsolved stack overflow seen very late during
	 * shutdown on sparc64 when the gmirror worker process exists.
	 */
	if (p == initproc && rebooting == 0) {
		printf("init died (signal %d, exit %d)\n",
		    WTERMSIG(rv), WEXITSTATUS(rv));
		panic("Going nowhere without my init!");
	}

	/*
	 * MUST abort all other threads before proceeding past here.
	 */
	PROC_LOCK(p);
	while (p->p_flag & P_HADTHREADS) {
		/*
		 * First check if some other thread got here before us.
		 * If so, act appropriately: exit or suspend.
		 */
		thread_suspend_check(0);

		/*
		 * Kill off the other threads. This requires
		 * some co-operation from other parts of the kernel
		 * so it may not be instantaneous.  With this state set
		 * any thread entering the kernel from userspace will
		 * thread_exit() in trap().  Any thread attempting to
		 * sleep will return immediately with EINTR or EWOULDBLOCK
		 * which will hopefully force them to back out to userland
		 * freeing resources as they go.  Any thread attempting
		 * to return to userland will thread_exit() from userret().
		 * thread_exit() will unsuspend us when the last of the
		 * other threads exits.
		 * If there is already a thread singler after resumption,
		 * calling thread_single will fail; in that case, we just
		 * re-check all suspension request, the thread should
		 * either be suspended there or exit.
		 */
		if (!thread_single(SINGLE_EXIT))
			break;

		/*
		 * All other activity in this process is now stopped.
		 * Threading support has been turned off.
		 */
	}
	KASSERT(p->p_numthreads == 1,
	    ("exit1: proc %p exiting with %d threads", p, p->p_numthreads));
	racct_sub(p, RACCT_NTHR, 1);
	/*
	 * Wakeup anyone in procfs' PIOCWAIT.  They should have a hold
	 * on our vmspace, so we should block below until they have
	 * released their reference to us.  Note that if they have
	 * requested S_EXIT stops we will block here until they ack
	 * via PIOCCONT.
	 */
	_STOPEVENT(p, S_EXIT, rv);

	/*
	 * Ignore any pending request to stop due to a stop signal.
	 * Once P_WEXIT is set, future requests will be ignored as
	 * well.
	 */
	p->p_flag &= ~P_STOPPED_SIG;
	KASSERT(!P_SHOULDSTOP(p), ("exiting process is stopped"));

	/*
	 * Note that we are exiting and do another wakeup of anyone in
	 * PIOCWAIT in case they aren't listening for S_EXIT stops or
	 * decided to wait again after we told them we are exiting.
	 */
	p->p_flag |= P_WEXIT;
	wakeup(&p->p_stype);

	/*
	 * Wait for any processes that have a hold on our vmspace to
	 * release their reference.
	 */
	while (p->p_lock > 0)
		msleep(&p->p_lock, &p->p_mtx, PWAIT, "exithold", 0);

	p->p_xstat = rv;	/* Let event handler change exit status */
	PROC_UNLOCK(p);
	/* Drain the limit callout while we don't have the proc locked */
	callout_drain(&p->p_limco);

#ifdef AUDIT
	/*
	 * The Sun BSM exit token contains two components: an exit status as
	 * passed to exit(), and a return value to indicate what sort of exit
	 * it was.  The exit status is WEXITSTATUS(rv), but it's not clear
	 * what the return value is.
	 */
	AUDIT_ARG_EXIT(WEXITSTATUS(rv), 0);
	AUDIT_SYSCALL_EXIT(0, td);
#endif

	/* Are we a task leader? */
	if (p == p->p_leader) {
		mtx_lock(&ppeers_lock);
		q = p->p_peers;
		while (q != NULL) {
			PROC_LOCK(q);
			kern_psignal(q, SIGKILL);
			PROC_UNLOCK(q);
			q = q->p_peers;
		}
		while (p->p_peers != NULL)
			msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
		mtx_unlock(&ppeers_lock);
	}

	/*
	 * Check if any loadable modules need anything done at process exit.
	 * E.g. SYSV IPC stuff
	 * XXX what if one of these generates an error?
	 */
	EVENTHANDLER_INVOKE(process_exit, p);

	/*
	 * If parent is waiting for us to exit or exec,
	 * P_PPWAIT is set; we will wakeup the parent below.
	 */
	PROC_LOCK(p);
	rv = p->p_xstat;	/* Event handler could change exit status */
	stopprofclock(p);
	p->p_flag &= ~(P_TRACED | P_PPWAIT | P_PPTRACE);

	/*
	 * Stop the real interval timer.  If the handler is currently
	 * executing, prevent it from rearming itself and let it finish.
	 */
	if (timevalisset(&p->p_realtimer.it_value) &&
	    callout_stop(&p->p_itcallout) == 0) {
		timevalclear(&p->p_realtimer.it_interval);
		msleep(&p->p_itcallout, &p->p_mtx, PWAIT, "ritwait", 0);
		KASSERT(!timevalisset(&p->p_realtimer.it_value),
		    ("realtime timer is still armed"));
	}
	PROC_UNLOCK(p);

	/*
	 * Reset any sigio structures pointing to us as a result of
	 * F_SETOWN with our pid.
	 */
	funsetownlst(&p->p_sigiolst);

	/*
	 * If this process has an nlminfo data area (for lockd), release it
	 */
	if (nlminfo_release_p != NULL && p->p_nlminfo != NULL)
		(*nlminfo_release_p)(p);

	/*
	 * Close open files and release open-file table.
	 * This may block!
	 */
	fdescfree(td);

	/*
	 * If this thread tickled GEOM, we need to wait for the giggling to
	 * stop before we return to userland
	 */
	if (td->td_pflags & TDP_GEOM)
		g_waitidle();

	/*
	 * Remove ourself from our leader's peer list and wake our leader.
	 */
	mtx_lock(&ppeers_lock);
	if (p->p_leader->p_peers) {
		q = p->p_leader;
		while (q->p_peers != p)
			q = q->p_peers;
		q->p_peers = p->p_peers;
		wakeup(p->p_leader);
	}
	mtx_unlock(&ppeers_lock);

	vmspace_exit(td);

	sx_xlock(&proctree_lock);
	if (SESS_LEADER(p)) {
		struct session *sp = p->p_session;
		struct tty *tp;

		/*
		 * s_ttyp is not zero'd; we use this to indicate that
		 * the session once had a controlling terminal. (for
		 * logging and informational purposes)
		 */
		SESS_LOCK(sp);
		ttyvp = sp->s_ttyvp;
		tp = sp->s_ttyp;
		sp->s_ttyvp = NULL;
		sp->s_ttydp = NULL;
		sp->s_leader = NULL;
		SESS_UNLOCK(sp);

		/*
		 * Signal foreground pgrp and revoke access to
		 * controlling terminal if it has not been revoked
		 * already.
		 *
		 * Because the TTY may have been revoked in the mean
		 * time and could already have a new session associated
		 * with it, make sure we don't send a SIGHUP to a
		 * foreground process group that does not belong to this
		 * session.
		 */

		if (tp != NULL) {
			tty_lock(tp);
			if (tp->t_session == sp)
				tty_signal_pgrp(tp, SIGHUP);
			tty_unlock(tp);
		}

		if (ttyvp != NULL) {
			sx_xunlock(&proctree_lock);
			if (vn_lock(ttyvp, LK_EXCLUSIVE) == 0) {
				VOP_REVOKE(ttyvp, REVOKEALL);
				VOP_UNLOCK(ttyvp, 0);
			}
			sx_xlock(&proctree_lock);
		}
	}
	fixjobc(p, p->p_pgrp, 0);
	sx_xunlock(&proctree_lock);
	(void)acct_process(td);

	/* Release the TTY now we've unlocked everything. */
	if (ttyvp != NULL)
		vrele(ttyvp);
#ifdef KTRACE
	ktrprocexit(td);
#endif
	/*
	 * Release reference to text vnode
	 */
	if (p->p_textvp != NULL) {
		vrele(p->p_textvp);
		p->p_textvp = NULL;
	}

	/*
	 * Release our limits structure.
	 */
	lim_free(p->p_limit);
	p->p_limit = NULL;

	tidhash_remove(td);

	/*
	 * Remove proc from allproc queue and pidhash chain.
	 * Place onto zombproc.  Unlink from parent's child list.
	 */
	sx_xlock(&allproc_lock);
	LIST_REMOVE(p, p_list);
	LIST_INSERT_HEAD(&zombproc, p, p_list);
	LIST_REMOVE(p, p_hash);
	sx_xunlock(&allproc_lock);

	/*
	 * Call machine-dependent code to release any
	 * machine-dependent resources other than the address space.
	 * The address space is released by "vmspace_exitfree(p)" in
	 * vm_waitproc().
	 */
	cpu_exit(td);

	WITNESS_WARN(WARN_PANIC, NULL, "process (pid %d) exiting", p->p_pid);

	/*
	 * Reparent all of our children to init.
	 */
	sx_xlock(&proctree_lock);
	q = LIST_FIRST(&p->p_children);
	if (q != NULL)		/* only need this if any child is S_ZOMB */
		wakeup(initproc);
	for (; q != NULL; q = nq) {
		nq = LIST_NEXT(q, p_sibling);
		PROC_LOCK(q);
		proc_reparent(q, initproc);
		q->p_sigparent = SIGCHLD;
		/*
		 * Traced processes are killed
		 * since their existence means someone is screwing up.
		 */
		if (q->p_flag & P_TRACED) {
			struct thread *temp;

			/*
			 * Since q was found on our children list, the
			 * proc_reparent() call moved q to the orphan
			 * list due to present P_TRACED flag. Clear
			 * orphan link for q now while q is locked.
			 */
			clear_orphan(q);
			q->p_flag &= ~(P_TRACED | P_STOPPED_TRACE);
			FOREACH_THREAD_IN_PROC(q, temp)
				temp->td_dbgflags &= ~TDB_SUSPEND;
			kern_psignal(q, SIGKILL);
		}
		PROC_UNLOCK(q);
	}

	/*
	 * Also get rid of our orphans.
	 */
	while ((q = LIST_FIRST(&p->p_orphans)) != NULL) {
		PROC_LOCK(q);
		clear_orphan(q);
		PROC_UNLOCK(q);
	}

	/* Save exit status. */
	PROC_LOCK(p);
	p->p_xthread = td;

	/* Tell the prison that we are gone. */
	prison_proc_free(p->p_ucred->cr_prison);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the exit if it
	 * has declared an interest.
	 */
	if (dtrace_fasttrap_exit)
		dtrace_fasttrap_exit(p);
#endif

	/*
	 * Notify interested parties of our demise.
	 */
	KNOTE_LOCKED(&p->p_klist, NOTE_EXIT);

#ifdef KDTRACE_HOOKS
	int reason = CLD_EXITED;
	if (WCOREDUMP(rv))
		reason = CLD_DUMPED;
	else if (WIFSIGNALED(rv))
		reason = CLD_KILLED;
	SDT_PROBE(proc, kernel, , exit, reason, 0, 0, 0, 0);
#endif

	/*
	 * Just delete all entries in the p_klist. At this point we won't
	 * report any more events, and there are nasty race conditions that
	 * can beat us if we don't.
	 */
	knlist_clear(&p->p_klist, 1);

	/*
	 * If this is a process with a descriptor, we may not need to deliver
	 * a signal to the parent.  proctree_lock is held over
	 * procdesc_exit() to serialize concurrent calls to close() and
	 * exit().
	 */
	if (p->p_procdesc == NULL || procdesc_exit(p)) {
		/*
		 * Notify parent that we're gone.  If parent has the
		 * PS_NOCLDWAIT flag set, or if the handler is set to SIG_IGN,
		 * notify process 1 instead (and hope it will handle this
		 * situation).
		 */
		PROC_LOCK(p->p_pptr);
		mtx_lock(&p->p_pptr->p_sigacts->ps_mtx);
		if (p->p_pptr->p_sigacts->ps_flag &
		    (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
			struct proc *pp;

			mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);
			pp = p->p_pptr;
			PROC_UNLOCK(pp);
			proc_reparent(p, initproc);
			p->p_sigparent = SIGCHLD;
			PROC_LOCK(p->p_pptr);

			/*
			 * Notify parent, so in case he was wait(2)ing or
			 * executing waitpid(2) with our pid, he will
			 * continue.
			 */
			wakeup(pp);
		} else
			mtx_unlock(&p->p_pptr->p_sigacts->ps_mtx);

		if (p->p_pptr == initproc)
			kern_psignal(p->p_pptr, SIGCHLD);
		else if (p->p_sigparent != 0) {
			if (p->p_sigparent == SIGCHLD)
				childproc_exited(p);
			else	/* LINUX thread */
				kern_psignal(p->p_pptr, p->p_sigparent);
		}
	} else
		PROC_LOCK(p->p_pptr);
	sx_xunlock(&proctree_lock);

	/*
	 * The state PRS_ZOMBIE prevents other proesses from sending
	 * signal to the process, to avoid memory leak, we free memory
	 * for signal queue at the time when the state is set.
	 */
	sigqueue_flush(&p->p_sigqueue);
	sigqueue_flush(&td->td_sigqueue);

	/*
	 * We have to wait until after acquiring all locks before
	 * changing p_state.  We need to avoid all possible context
	 * switches (including ones from blocking on a mutex) while
	 * marked as a zombie.  We also have to set the zombie state
	 * before we release the parent process' proc lock to avoid
	 * a lost wakeup.  So, we first call wakeup, then we grab the
	 * sched lock, update the state, and release the parent process'
	 * proc lock.
	 */
	wakeup(p->p_pptr);
	cv_broadcast(&p->p_pwait);
	sched_exit(p->p_pptr, td);
	PROC_SLOCK(p);
	p->p_state = PRS_ZOMBIE;
	PROC_UNLOCK(p->p_pptr);

	/*
	 * Hopefully no one will try to deliver a signal to the process this
	 * late in the game.
	 */
	knlist_destroy(&p->p_klist);

	/*
	 * Save our children's rusage information in our exit rusage.
	 */
	ruadd(&p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);

	/*
	 * Make sure the scheduler takes this thread out of its tables etc.
	 * This will also release this thread's reference to the ucred.
	 * Other thread parts to release include pcb bits and such.
	 */
	thread_exit();
}
示例#11
0
文件: mem.c 项目: jamesbjackson/src
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			int i;
			int address_valid = 0;

			v = uio->uio_offset;
			v &= ~PAGE_MASK;
			for (i = 0; dump_avail[i] || dump_avail[i + 1];
			i += 2) {
				if (v >= dump_avail[i] &&
				    v < dump_avail[i + 1]) {
					address_valid = 1;
					break;
				}
			}
			if (!address_valid)
				return (EINVAL);
			sx_xlock(&tmppt_lock);
			pmap_kenter((vm_offset_t)_tmppt, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&_tmppt[o], (int)c, uio);
			pmap_qremove((vm_offset_t)_tmppt, 1);
			sx_xunlock(&tmppt_lock);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);

			for (; addr < eaddr; addr += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);
			if (!kernacc((caddr_t)(int)uio->uio_offset, c,
			    uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE))
					return (EFAULT);
			error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}
示例#12
0
int
kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
{
	struct iovec iov;
	struct uio uio;
	struct proc *curp, *p, *pp;
	struct thread *td2;
	struct ptrace_io_desc *piod;
	int error, write, tmp;
	int proctree_locked = 0;

	curp = td->td_proc;

	/* Lock proctree before locking the process. */
	switch (req) {
	case PT_TRACE_ME:
	case PT_ATTACH:
	case PT_STEP:
	case PT_CONTINUE:
	case PT_DETACH:
		sx_xlock(&proctree_lock);
		proctree_locked = 1;
		break;
	default:
		break;
	}
		
	write = 0;
	if (req == PT_TRACE_ME) {
		p = td->td_proc;
		PROC_LOCK(p);
	} else {
		if ((p = pfind(pid)) == NULL) {
			if (proctree_locked)
				sx_xunlock(&proctree_lock);
			return (ESRCH);
		}
	}
	if ((error = p_cansee(td, p)) != 0)
		goto fail;

	if ((error = p_candebug(td, p)) != 0)
		goto fail;

	/*
	 * System processes can't be debugged.
	 */
	if ((p->p_flag & P_SYSTEM) != 0) {
		error = EINVAL;
		goto fail;
	}
	
	/*
	 * Permissions check
	 */
	switch (req) {
	case PT_TRACE_ME:
		/* Always legal. */
		break;

	case PT_ATTACH:
		/* Self */
		if (p->p_pid == td->td_proc->p_pid) {
			error = EINVAL;
			goto fail;
		}

		/* Already traced */
		if (p->p_flag & P_TRACED) {
			error = EBUSY;
			goto fail;
		}

		/* Can't trace an ancestor if you're being traced. */
		if (curp->p_flag & P_TRACED) {
			for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
				if (pp == p) {
					error = EINVAL;
					goto fail;
				}
			}
		}


		/* OK */
		break;

	case PT_READ_I:
	case PT_READ_D:
	case PT_WRITE_I:
	case PT_WRITE_D:
	case PT_IO:
	case PT_CONTINUE:
	case PT_KILL:
	case PT_STEP:
	case PT_DETACH:
	case PT_GETREGS:
	case PT_SETREGS:
	case PT_GETFPREGS:
	case PT_SETFPREGS:
	case PT_GETDBREGS:
	case PT_SETDBREGS:
		/* not being traced... */
		if ((p->p_flag & P_TRACED) == 0) {
			error = EPERM;
			goto fail;
		}

		/* not being traced by YOU */
		if (p->p_pptr != td->td_proc) {
			error = EBUSY;
			goto fail;
		}

		/* not currently stopped */
		if (!P_SHOULDSTOP(p) || (p->p_flag & P_WAITED) == 0) {
			error = EBUSY;
			goto fail;
		}

		/* OK */
		break;

	default:
		error = EINVAL;
		goto fail;
	}

	td2 = FIRST_THREAD_IN_PROC(p);
#ifdef FIX_SSTEP
	/*
	 * Single step fixup ala procfs
	 */
	FIX_SSTEP(td2);			/* XXXKSE */
#endif

	/*
	 * Actually do the requests
	 */

	td->td_retval[0] = 0;

	switch (req) {
	case PT_TRACE_ME:
		/* set my trace flag and "owner" so it can read/write me */
		p->p_flag |= P_TRACED;
		p->p_oppid = p->p_pptr->p_pid;
		PROC_UNLOCK(p);
		sx_xunlock(&proctree_lock);
		return (0);

	case PT_ATTACH:
		/* security check done above */
		p->p_flag |= P_TRACED;
		p->p_oppid = p->p_pptr->p_pid;
		if (p->p_pptr != td->td_proc)
			proc_reparent(p, td->td_proc);
		data = SIGSTOP;
		goto sendsig;	/* in PT_CONTINUE below */

	case PT_STEP:
	case PT_CONTINUE:
	case PT_DETACH:
		/* XXX data is used even in the PT_STEP case. */
		if (req != PT_STEP && (unsigned)data > _SIG_MAXSIG) {
			error = EINVAL;
			goto fail;
		}

		_PHOLD(p);

		if (req == PT_STEP) {
			error = ptrace_single_step(td2);
			if (error) {
				_PRELE(p);
				goto fail;
			}
		}

		if (addr != (void *)1) {
			error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
			if (error) {
				_PRELE(p);
				goto fail;
			}
		}
		_PRELE(p);

		if (req == PT_DETACH) {
			/* reset process parent */
			if (p->p_oppid != p->p_pptr->p_pid) {
				struct proc *pp;

				PROC_UNLOCK(p);
				pp = pfind(p->p_oppid);
				if (pp == NULL)
					pp = initproc;
				else
					PROC_UNLOCK(pp);
				PROC_LOCK(p);
				proc_reparent(p, pp);
			}
			p->p_flag &= ~(P_TRACED | P_WAITED);
			p->p_oppid = 0;

			/* should we send SIGCHLD? */
		}

	sendsig:
		if (proctree_locked)
			sx_xunlock(&proctree_lock);
		/* deliver or queue signal */
		if (P_SHOULDSTOP(p)) {
			p->p_xstat = data;
			mtx_lock_spin(&sched_lock);
			p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
			thread_unsuspend(p);
			setrunnable(td2);	/* XXXKSE */
			/* Need foreach kse in proc, ... make_kse_queued(). */
			mtx_unlock_spin(&sched_lock);
		} else if (data)
			psignal(p, data);
		PROC_UNLOCK(p);
		
		return (0);

	case PT_WRITE_I:
	case PT_WRITE_D:
		write = 1;
		/* FALLTHROUGH */
	case PT_READ_I:
	case PT_READ_D:
		PROC_UNLOCK(p);
		tmp = 0;
		/* write = 0 set above */
		iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
		iov.iov_len = sizeof(int);
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(uintptr_t)addr;
		uio.uio_resid = sizeof(int);
		uio.uio_segflg = UIO_SYSSPACE;	/* i.e.: the uap */
		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
		uio.uio_td = td;
		error = proc_rwmem(p, &uio);
		if (uio.uio_resid != 0) {
			/*
			 * XXX proc_rwmem() doesn't currently return ENOSPC,
			 * so I think write() can bogusly return 0.
			 * XXX what happens for short writes?  We don't want
			 * to write partial data.
			 * XXX proc_rwmem() returns EPERM for other invalid
			 * addresses.  Convert this to EINVAL.  Does this
			 * clobber returns of EPERM for other reasons?
			 */
			if (error == 0 || error == ENOSPC || error == EPERM)
				error = EINVAL;	/* EOF */
		}
		if (!write)
			td->td_retval[0] = tmp;
		return (error);

	case PT_IO:
		PROC_UNLOCK(p);
		piod = addr;
		iov.iov_base = piod->piod_addr;
		iov.iov_len = piod->piod_len;
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
		uio.uio_resid = piod->piod_len;
		uio.uio_segflg = UIO_USERSPACE;
		uio.uio_td = td;
		switch (piod->piod_op) {
		case PIOD_READ_D:
		case PIOD_READ_I:
			uio.uio_rw = UIO_READ;
			break;
		case PIOD_WRITE_D:
		case PIOD_WRITE_I:
			uio.uio_rw = UIO_WRITE;
			break;
		default:
			return (EINVAL);
		}
		error = proc_rwmem(p, &uio);
		piod->piod_len -= uio.uio_resid;
		return (error);

	case PT_KILL:
		data = SIGKILL;
		goto sendsig;	/* in PT_CONTINUE above */

	case PT_SETREGS:
		_PHOLD(p);
		error = proc_write_regs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_GETREGS:
		_PHOLD(p);
		error = proc_read_regs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_SETFPREGS:
		_PHOLD(p);
		error = proc_write_fpregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_GETFPREGS:
		_PHOLD(p);
		error = proc_read_fpregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_SETDBREGS:
		_PHOLD(p);
		error = proc_write_dbregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	case PT_GETDBREGS:
		_PHOLD(p);
		error = proc_read_dbregs(td2, addr);
		_PRELE(p);
		PROC_UNLOCK(p);
		return (error);

	default:
		KASSERT(0, ("unreachable code\n"));
		break;
	}

	KASSERT(0, ("unreachable code\n"));
	return (0);

fail:
	PROC_UNLOCK(p);
	if (proctree_locked)
		sx_xunlock(&proctree_lock);
	return (error);
}
示例#13
0
static int
linux_clone_proc(struct thread *td, struct linux_clone_args *args)
{
	struct fork_req fr;
	int error, ff = RFPROC | RFSTOPPED;
	struct proc *p2;
	struct thread *td2;
	int exit_signal;
	struct linux_emuldata *em;

#ifdef DEBUG
	if (ldebug(clone)) {
		printf(ARGS(clone, "flags %x, stack %p, parent tid: %p, "
		    "child tid: %p"), (unsigned)args->flags,
		    args->stack, args->parent_tidptr, args->child_tidptr);
	}
#endif

	exit_signal = args->flags & 0x000000ff;
	if (LINUX_SIG_VALID(exit_signal)) {
		exit_signal = linux_to_bsd_signal(exit_signal);
	} else if (exit_signal != 0)
		return (EINVAL);

	if (args->flags & LINUX_CLONE_VM)
		ff |= RFMEM;
	if (args->flags & LINUX_CLONE_SIGHAND)
		ff |= RFSIGSHARE;
	/*
	 * XXX: In Linux, sharing of fs info (chroot/cwd/umask)
	 * and open files is independant.  In FreeBSD, its in one
	 * structure but in reality it does not cause any problems
	 * because both of these flags are usually set together.
	 */
	if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
		ff |= RFFDG;

	if (args->flags & LINUX_CLONE_PARENT_SETTID)
		if (args->parent_tidptr == NULL)
			return (EINVAL);

	if (args->flags & LINUX_CLONE_VFORK)
		ff |= RFPPWAIT;

	bzero(&fr, sizeof(fr));
	fr.fr_flags = ff;
	fr.fr_procp = &p2;
	error = fork1(td, &fr);
	if (error)
		return (error);

	td2 = FIRST_THREAD_IN_PROC(p2);

	/* create the emuldata */
	linux_proc_init(td, td2, args->flags);

	em = em_find(td2);
	KASSERT(em != NULL, ("clone_proc: emuldata not found.\n"));

	if (args->flags & LINUX_CLONE_CHILD_SETTID)
		em->child_set_tid = args->child_tidptr;
	else
	   	em->child_set_tid = NULL;

	if (args->flags & LINUX_CLONE_CHILD_CLEARTID)
		em->child_clear_tid = args->child_tidptr;
	else
	   	em->child_clear_tid = NULL;

	if (args->flags & LINUX_CLONE_PARENT_SETTID) {
		error = copyout(&p2->p_pid, args->parent_tidptr,
		    sizeof(p2->p_pid));
		if (error)
			printf(LMSG("copyout failed!"));
	}

	PROC_LOCK(p2);
	p2->p_sigparent = exit_signal;
	PROC_UNLOCK(p2);
	/*
	 * In a case of stack = NULL, we are supposed to COW calling process
	 * stack. This is what normal fork() does, so we just keep tf_rsp arg
	 * intact.
	 */
	linux_set_upcall_kse(td2, PTROUT(args->stack));

	if (args->flags & LINUX_CLONE_SETTLS)
		linux_set_cloned_tls(td2, args->tls);

	/*
	 * If CLONE_PARENT is set, then the parent of the new process will be 
	 * the same as that of the calling process.
	 */
	if (args->flags & LINUX_CLONE_PARENT) {
		sx_xlock(&proctree_lock);
		PROC_LOCK(p2);
		proc_reparent(p2, td->td_proc->p_pptr);
		PROC_UNLOCK(p2);
		sx_xunlock(&proctree_lock);
	}

#ifdef DEBUG
	if (ldebug(clone))
		printf(LMSG("clone: successful rfork to %d, "
		    "stack %p sig = %d"), (int)p2->p_pid, args->stack,
		    exit_signal);
#endif

	/*
	 * Make this runnable after we are finished with it.
	 */
	thread_lock(td2);
	TD_SET_CAN_RUN(td2);
	sched_add(td2, SRQ_BORING);
	thread_unlock(td2);

	td->td_retval[0] = p2->p_pid;

	return (0);
}
示例#14
0
文件: uipc_sem.c 项目: coyizumi/cs111
/* Other helper routines. */
static int
ksem_create(struct thread *td, const char *name, semid_t *semidp, mode_t mode,
    unsigned int value, int flags, int compat32)
{
	struct filedesc *fdp;
	struct ksem *ks;
	struct file *fp;
	char *path;
	Fnv32_t fnv;
	int error, fd;

	if (value > SEM_VALUE_MAX)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	mode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
	error = falloc(td, &fp, &fd, O_CLOEXEC);
	if (error) {
		if (name == NULL)
			error = ENOSPC;
		return (error);
	}

	/*
	 * Go ahead and copyout the file descriptor now.  This is a bit
	 * premature, but it is a lot easier to handle errors as opposed
	 * to later when we've possibly created a new semaphore, etc.
	 */
	error = ksem_create_copyout_semid(td, semidp, fd, compat32);
	if (error) {
		fdclose(fdp, fp, fd, td);
		fdrop(fp, td);
		return (error);
	}

	if (name == NULL) {
		/* Create an anonymous semaphore. */
		ks = ksem_alloc(td->td_ucred, mode, value);
		if (ks == NULL)
			error = ENOSPC;
		else
			ks->ks_flags |= KS_ANONYMOUS;
	} else {
		path = malloc(MAXPATHLEN, M_KSEM, M_WAITOK);
		error = copyinstr(name, path, MAXPATHLEN, NULL);

		/* Require paths to start with a '/' character. */
		if (error == 0 && path[0] != '/')
			error = EINVAL;
		if (error) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			free(path, M_KSEM);
			return (error);
		}

		fnv = fnv_32_str(path, FNV1_32_INIT);
		sx_xlock(&ksem_dict_lock);
		ks = ksem_lookup(path, fnv);
		if (ks == NULL) {
			/* Object does not exist, create it if requested. */
			if (flags & O_CREAT) {
				ks = ksem_alloc(td->td_ucred, mode, value);
				if (ks == NULL)
					error = ENFILE;
				else {
					ksem_insert(path, fnv, ks);
					path = NULL;
				}
			} else
				error = ENOENT;
		} else {
			/*
			 * Object already exists, obtain a new
			 * reference if requested and permitted.
			 */
			if ((flags & (O_CREAT | O_EXCL)) ==
			    (O_CREAT | O_EXCL))
				error = EEXIST;
			else {
#ifdef MAC
				error = mac_posixsem_check_open(td->td_ucred,
				    ks);
				if (error == 0)
#endif
				error = ksem_access(ks, td->td_ucred);
			}
			if (error == 0)
				ksem_hold(ks);
#ifdef INVARIANTS
			else
				ks = NULL;
#endif
		}
		sx_xunlock(&ksem_dict_lock);
		if (path)
			free(path, M_KSEM);
	}

	if (error) {
		KASSERT(ks == NULL, ("ksem_create error with a ksem"));
		fdclose(fdp, fp, fd, td);
		fdrop(fp, td);
		return (error);
	}
	KASSERT(ks != NULL, ("ksem_create w/o a ksem"));

	finit(fp, FREAD | FWRITE, DTYPE_SEM, ks, &ksem_ops);

	fdrop(fp, td);

	return (0);
}
示例#15
0
static int
at91_spi_transfer(device_t dev, device_t child, struct spi_command *cmd)
{
	struct at91_spi_softc *sc;
	bus_addr_t addr;
	int err, i, j, mode[4];

	KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz,
	    ("%s: TX/RX command sizes should be equal", __func__));
	KASSERT(cmd->tx_data_sz == cmd->rx_data_sz,
	    ("%s: TX/RX data sizes should be equal", __func__));

	sc = device_get_softc(dev);
	i = 0;

	sx_xlock(&sc->xfer_mtx);

	/*
	 * Disable transfers while we set things up.
	 */
	WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);

#ifdef SPI_CHIPSEL_SUPPORT
	if (cmd->cs < 0 || cmd->cs > 3) {
		device_printf(dev,
		    "Invalid chip select %d requested by %s\n", cmd->cs,
		    device_get_nameunit(child));
		err = EINVAL;
		goto out;
	}
#ifdef SPI_CHIP_SELECT_HIGH_SUPPORT
	if (at91_is_rm92() && cmd->cs == 0 &&
	    (cmd->flags & SPI_CHIP_SELECT_HIGH) != 0) {
		device_printf(dev,
		    "Invalid chip select high requested by %s\n",
		    device_get_nameunit(child));
		err = EINVAL;
		goto out;
	}
#endif
	WR4(sc, SPI_MR, (RD4(sc, SPI_MR) & ~0x000f0000) | CS_TO_MR(cmd->cs));
#endif

	/*
	 * Set up the TX side of the transfer.
	 */
	if ((err = bus_dmamap_load(sc->dmatag, sc->map[i], cmd->tx_cmd,
	    cmd->tx_cmd_sz, at91_getaddr, &addr, 0)) != 0)
		goto out;
	WR4(sc, PDC_TPR, addr);
	WR4(sc, PDC_TCR, cmd->tx_cmd_sz);
	bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREWRITE);
	mode[i++] = BUS_DMASYNC_POSTWRITE;
	if (cmd->tx_data_sz > 0) {
		if ((err = bus_dmamap_load(sc->dmatag, sc->map[i],
		    cmd->tx_data, cmd->tx_data_sz, at91_getaddr, &addr, 0)) !=
		    0)
			goto out;
		WR4(sc, PDC_TNPR, addr);
		WR4(sc, PDC_TNCR, cmd->tx_data_sz);
		bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREWRITE);
		mode[i++] = BUS_DMASYNC_POSTWRITE;
	}

	/*
	 * Set up the RX side of the transfer.
	 */
	if ((err = bus_dmamap_load(sc->dmatag, sc->map[i], cmd->rx_cmd,
	    cmd->rx_cmd_sz, at91_getaddr, &addr, 0)) != 0)
		goto out;
	WR4(sc, PDC_RPR, addr);
	WR4(sc, PDC_RCR, cmd->rx_cmd_sz);
	bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREREAD);
	mode[i++] = BUS_DMASYNC_POSTREAD;
	if (cmd->rx_data_sz > 0) {
		if ((err = bus_dmamap_load(sc->dmatag, sc->map[i],
		    cmd->rx_data, cmd->rx_data_sz, at91_getaddr, &addr, 0)) !=
		    0)
			goto out;
		WR4(sc, PDC_RNPR, addr);
		WR4(sc, PDC_RNCR, cmd->rx_data_sz);
		bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREREAD);
		mode[i++] = BUS_DMASYNC_POSTREAD;
	}

	/*
	 * Start the transfer, wait for it to complete.
	 */
	sc->xfer_done = 0;
	WR4(sc, SPI_IER, SPI_SR_RXBUFF);
	WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN | PDC_PTCR_RXTEN);
	do
		err = tsleep(&sc->xfer_done, PCATCH | PZERO, "at91_spi", hz);
	while (sc->xfer_done == 0 && err != EINTR);

	/*
	 * Stop the transfer and clean things up.
	 */
	WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
	if (err == 0)
		for (j = 0; j < i; j++)
			bus_dmamap_sync(sc->dmatag, sc->map[j], mode[j]);
out:
	for (j = 0; j < i; j++)
		bus_dmamap_unload(sc->dmatag, sc->map[j]);

	sx_xunlock(&sc->xfer_mtx);

	return (err);
}
示例#16
0
/*
 * Create a new transport for a socket optained via soaccept().
 */
SVCXPRT *
svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
{
	SVCXPRT *xprt = NULL;
	struct cf_conn *cd = NULL;
	struct sockaddr* sa = NULL;
	struct sockopt opt;
	int one = 1;
	int error;

	bzero(&opt, sizeof(struct sockopt));
	opt.sopt_dir = SOPT_SET;
	opt.sopt_level = SOL_SOCKET;
	opt.sopt_name = SO_KEEPALIVE;
	opt.sopt_val = &one;
	opt.sopt_valsize = sizeof(one);
	error = sosetopt(so, &opt);
	if (error) {
		return (NULL);
	}

	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
		bzero(&opt, sizeof(struct sockopt));
		opt.sopt_dir = SOPT_SET;
		opt.sopt_level = IPPROTO_TCP;
		opt.sopt_name = TCP_NODELAY;
		opt.sopt_val = &one;
		opt.sopt_valsize = sizeof(one);
		error = sosetopt(so, &opt);
		if (error) {
			return (NULL);
		}
	}

	cd = mem_alloc(sizeof(*cd));
	cd->strm_stat = XPRT_IDLE;

	xprt = svc_xprt_alloc();
	sx_init(&xprt->xp_lock, "xprt->xp_lock");
	xprt->xp_pool = pool;
	xprt->xp_socket = so;
	xprt->xp_p1 = cd;
	xprt->xp_p2 = NULL;
	xprt->xp_ops = &svc_vc_ops;

	/*
	 * See http://www.connectathon.org/talks96/nfstcp.pdf - client
	 * has a 5 minute timer, server has a 6 minute timer.
	 */
	xprt->xp_idletimeout = 6 * 60;

	memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);

	CURVNET_SET(so->so_vnet);
	error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
	CURVNET_RESTORE();
	if (error)
		goto cleanup_svc_vc_create;

	memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
	free(sa, M_SONAME);

	xprt_register(xprt);

	SOCKBUF_LOCK(&so->so_rcv);
	xprt->xp_upcallset = 1;
	soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
	SOCKBUF_UNLOCK(&so->so_rcv);

	/*
	 * Throw the transport into the active list in case it already
	 * has some data buffered.
	 */
	sx_xlock(&xprt->xp_lock);
	xprt_active(xprt);
	sx_xunlock(&xprt->xp_lock);

	return (xprt);
cleanup_svc_vc_create:
	if (xprt) {
		sx_destroy(&xprt->xp_lock);
		svc_xprt_free(xprt);
	}
	if (cd)
		mem_free(cd, sizeof(*cd));
	return (NULL);
}
示例#17
0
static void
ufs_extattr_uepm_unlock(struct ufsmount *ump)
{

	sx_xunlock(&ump->um_extattr.uepm_lock);
}
示例#18
0
/*ARGSUSED*/
static bool_t
svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
    struct sockaddr **addrp, struct mbuf **mp)
{
	struct socket *so = NULL;
	struct sockaddr *sa = NULL;
	int error;
	SVCXPRT *new_xprt;

	/*
	 * The socket upcall calls xprt_active() which will eventually
	 * cause the server to call us here. We attempt to accept a
	 * connection from the socket and turn it into a new
	 * transport. If the accept fails, we have drained all pending
	 * connections so we call xprt_inactive().
	 */
	sx_xlock(&xprt->xp_lock);

	error = svc_vc_accept(xprt->xp_socket, &so);

	if (error == EWOULDBLOCK) {
		/*
		 * We must re-test for new connections after taking
		 * the lock to protect us in the case where a new
		 * connection arrives after our call to accept fails
		 * with EWOULDBLOCK.
		 */
		ACCEPT_LOCK();
		if (TAILQ_EMPTY(&xprt->xp_socket->so_comp))
			xprt_inactive_self(xprt);
		ACCEPT_UNLOCK();
		sx_xunlock(&xprt->xp_lock);
		return (FALSE);
	}

	if (error) {
		SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
		if (xprt->xp_upcallset) {
			xprt->xp_upcallset = 0;
			soupcall_clear(xprt->xp_socket, SO_RCV);
		}
		SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
		xprt_inactive_self(xprt);
		sx_xunlock(&xprt->xp_lock);
		return (FALSE);
	}

	sx_xunlock(&xprt->xp_lock);

	sa = NULL;
	error = soaccept(so, &sa);

	if (error) {
		/*
		 * XXX not sure if I need to call sofree or soclose here.
		 */
		if (sa)
			free(sa, M_SONAME);
		return (FALSE);
	}

	/*
	 * svc_vc_create_conn will call xprt_register - we don't need
	 * to do anything with the new connection except derefence it.
	 */
	new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
	if (!new_xprt) {
		soclose(so);
	} else {
		SVC_RELEASE(new_xprt);
	}

	free(sa, M_SONAME);

	return (FALSE); /* there is never an rpc msg to be processed */
}
示例#19
0
static void
do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
    struct vmspace *vm2, struct file *fp_procdesc)
{
	struct proc *p1, *pptr;
	int trypid;
	struct filedesc *fd;
	struct filedesc_to_leader *fdtol;
	struct sigacts *newsigacts;

	sx_assert(&proctree_lock, SX_SLOCKED);
	sx_assert(&allproc_lock, SX_XLOCKED);

	p1 = td->td_proc;

	trypid = fork_findpid(fr->fr_flags);

	sx_sunlock(&proctree_lock);

	p2->p_state = PRS_NEW;		/* protect against others */
	p2->p_pid = trypid;
	AUDIT_ARG_PID(p2->p_pid);
	LIST_INSERT_HEAD(&allproc, p2, p_list);
	allproc_gen++;
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	tidhash_add(td2);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	sx_xunlock(&allproc_lock);

	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    __rangeof(struct proc, p_startcopy, p_endcopy));
	pargs_hold(p2->p_args);

	PROC_UNLOCK(p1);

	bzero(&p2->p_startzero,
	    __rangeof(struct proc, p_startzero, p_endzero));

	/* Tell the prison that we exist. */
	prison_proc_hold(p2->p_ucred->cr_prison);

	PROC_UNLOCK(p2);

	/*
	 * Malloc things while we don't hold any locks.
	 */
	if (fr->fr_flags & RFSIGSHARE)
		newsigacts = NULL;
	else
		newsigacts = sigacts_alloc();

	/*
	 * Copy filedesc.
	 */
	if (fr->fr_flags & RFCFDG) {
		fd = fdinit(p1->p_fd, false);
		fdtol = NULL;
	} else if (fr->fr_flags & RFFDG) {
		fd = fdcopy(p1->p_fd);
		fdtol = NULL;
	} else {
		fd = fdshare(p1->p_fd);
		if (p1->p_fdtol == NULL)
			p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
			    p1->p_leader);
		if ((fr->fr_flags & RFTHREAD) != 0) {
			/*
			 * Shared file descriptor table, and shared
			 * process leaders.
			 */
			fdtol = p1->p_fdtol;
			FILEDESC_XLOCK(p1->p_fd);
			fdtol->fdl_refcount++;
			FILEDESC_XUNLOCK(p1->p_fd);
		} else {
			/* 
			 * Shared file descriptor table, and different
			 * process leaders.
			 */
			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
			    p1->p_fd, p2);
		}
	}
	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */

	PROC_LOCK(p2);
	PROC_LOCK(p1);

	bzero(&td2->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));

	bcopy(&td->td_startcopy, &td2->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
	td2->td_sigstk = td->td_sigstk;
	td2->td_flags = TDF_INMEM;
	td2->td_lend_user_pri = PRI_MAX;

#ifdef VIMAGE
	td2->td_vnet = NULL;
	td2->td_vnet_lpush = NULL;
#endif

	/*
	 * Allow the scheduler to initialize the child.
	 */
	thread_lock(td);
	sched_fork(td, td2);
	thread_unlock(td);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 */
	p2->p_flag = P_INMEM;
	p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP);
	p2->p_swtick = ticks;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);

	/*
	 * Whilst the proc lock is held, copy the VM domain data out
	 * using the VM domain method.
	 */
	vm_domain_policy_init(&p2->p_vm_dom_policy);
	vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
	    &p1->p_vm_dom_policy);

	if (fr->fr_flags & RFSIGSHARE) {
		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
	} else {
		sigacts_copy(newsigacts, p1->p_sigacts);
		p2->p_sigacts = newsigacts;
	}

	if (fr->fr_flags & RFTSIGZMB)
	        p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
	else if (fr->fr_flags & RFLINUXTHPN)
	        p2->p_sigparent = SIGUSR1;
	else
	        p2->p_sigparent = SIGCHLD;

	p2->p_textvp = p1->p_textvp;
	p2->p_fd = fd;
	p2->p_fdtol = fdtol;

	if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
		p2->p_flag |= P_PROTECTED;
		p2->p_flag2 |= P2_INHERIT_PROTECTED;
	}

	/*
	 * p_limit is copy-on-write.  Bump its refcount.
	 */
	lim_fork(p1, p2);

	thread_cow_get_proc(td2, p2);

	pstats_fork(p1->p_stats, p2->p_stats);

	PROC_UNLOCK(p1);
	PROC_UNLOCK(p2);

	/* Bump references to the text vnode (for procfs). */
	if (p2->p_textvp)
		vrefact(p2->p_textvp);

	/*
	 * Set up linkage for kernel based threading.
	 */
	if ((fr->fr_flags & RFTHREAD) != 0) {
		mtx_lock(&ppeers_lock);
		p2->p_peers = p1->p_peers;
		p1->p_peers = p2;
		p2->p_leader = p1->p_leader;
		mtx_unlock(&ppeers_lock);
		PROC_LOCK(p1->p_leader);
		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
			PROC_UNLOCK(p1->p_leader);
			/*
			 * The task leader is exiting, so process p1 is
			 * going to be killed shortly.  Since p1 obviously
			 * isn't dead yet, we know that the leader is either
			 * sending SIGKILL's to all the processes in this
			 * task or is sleeping waiting for all the peers to
			 * exit.  We let p1 complete the fork, but we need
			 * to go ahead and kill the new process p2 since
			 * the task leader may not get a chance to send
			 * SIGKILL to it.  We leave it on the list so that
			 * the task leader will wait for this new process
			 * to commit suicide.
			 */
			PROC_LOCK(p2);
			kern_psignal(p2, SIGKILL);
			PROC_UNLOCK(p2);
		} else
			PROC_UNLOCK(p1->p_leader);
	} else {
		p2->p_peers = NULL;
		p2->p_leader = p2;
	}

	sx_xlock(&proctree_lock);
	PGRP_LOCK(p1->p_pgrp);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	/*
	 * Preserve some more flags in subprocess.  P_PROFIL has already
	 * been preserved.
	 */
	p2->p_flag |= p1->p_flag & P_SUGID;
	td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
	SESS_LOCK(p1->p_session);
	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		p2->p_flag |= P_CONTROLT;
	SESS_UNLOCK(p1->p_session);
	if (fr->fr_flags & RFPPWAIT)
		p2->p_flag |= P_PPWAIT;

	p2->p_pgrp = p1->p_pgrp;
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	PGRP_UNLOCK(p1->p_pgrp);
	LIST_INIT(&p2->p_children);
	LIST_INIT(&p2->p_orphans);

	callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);

	/*
	 * If PF_FORK is set, the child process inherits the
	 * procfs ioctl flags from its parent.
	 */
	if (p1->p_pfsflags & PF_FORK) {
		p2->p_stops = p1->p_stops;
		p2->p_pfsflags = p1->p_pfsflags;
	}

	/*
	 * This begins the section where we must prevent the parent
	 * from being swapped.
	 */
	_PHOLD(p1);
	PROC_UNLOCK(p1);

	/*
	 * Attach the new process to its parent.
	 *
	 * If RFNOWAIT is set, the newly created process becomes a child
	 * of init.  This effectively disassociates the child from the
	 * parent.
	 */
	if ((fr->fr_flags & RFNOWAIT) != 0) {
		pptr = p1->p_reaper;
		p2->p_reaper = pptr;
	} else {
		p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
		    p1 : p1->p_reaper;
		pptr = p1;
	}
	p2->p_pptr = pptr;
	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
	LIST_INIT(&p2->p_reaplist);
	LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
	if (p2->p_reaper == p1)
		p2->p_reapsubtree = p2->p_pid;
	sx_xunlock(&proctree_lock);

	/* Inform accounting that we have forked. */
	p2->p_acflag = AFORK;
	PROC_UNLOCK(p2);

#ifdef KTRACE
	ktrprocfork(p1, p2);
#endif

	/*
	 * Finish creating the child process.  It will return via a different
	 * execution path later.  (ie: directly into user mode)
	 */
	vm_forkproc(td, p2, td2, vm2, fr->fr_flags);

	if (fr->fr_flags == (RFFDG | RFPROC)) {
		VM_CNT_INC(v_forks);
		VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
		VM_CNT_INC(v_vforks);
		VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (p1 == &proc0) {
		VM_CNT_INC(v_kthreads);
		VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else {
		VM_CNT_INC(v_rforks);
		VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	}

	/*
	 * Associate the process descriptor with the process before anything
	 * can happen that might cause that process to need the descriptor.
	 * However, don't do this until after fork(2) can no longer fail.
	 */
	if (fr->fr_flags & RFPROCDESC)
		procdesc_new(p2, fr->fr_pd_flags);

	/*
	 * Both processes are set up, now check if any loadable modules want
	 * to adjust anything.
	 */
	EVENTHANDLER_INVOKE(process_fork, p1, p2, fr->fr_flags);

	/*
	 * Set the child start time and mark the process as being complete.
	 */
	PROC_LOCK(p2);
	PROC_LOCK(p1);
	microuptime(&p2->p_stats->p_start);
	PROC_SLOCK(p2);
	p2->p_state = PRS_NORMAL;
	PROC_SUNLOCK(p2);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the new process so that any
	 * tracepoints inherited from the parent can be removed. We have to do
	 * this only after p_state is PRS_NORMAL since the fasttrap module will
	 * use pfind() later on.
	 */
	if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
		dtrace_fasttrap_fork(p1, p2);
#endif
	/*
	 * Hold the process so that it cannot exit after we make it runnable,
	 * but before we wait for the debugger.
	 */
	_PHOLD(p2);
	if (p1->p_ptevents & PTRACE_FORK) {
		/*
		 * Arrange for debugger to receive the fork event.
		 *
		 * We can report PL_FLAG_FORKED regardless of
		 * P_FOLLOWFORK settings, but it does not make a sense
		 * for runaway child.
		 */
		td->td_dbgflags |= TDB_FORK;
		td->td_dbg_forked = p2->p_pid;
		td2->td_dbgflags |= TDB_STOPATFORK;
	}
	if (fr->fr_flags & RFPPWAIT) {
		td->td_pflags |= TDP_RFPPWAIT;
		td->td_rfppwait_p = p2;
		td->td_dbgflags |= TDB_VFORK;
	}
	PROC_UNLOCK(p2);

	/*
	 * Now can be swapped.
	 */
	_PRELE(p1);
	PROC_UNLOCK(p1);

	/*
	 * Tell any interested parties about the new process.
	 */
	knote_fork(p1->p_klist, p2->p_pid);
	SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);

	if (fr->fr_flags & RFPROCDESC) {
		procdesc_finit(p2->p_procdesc, fp_procdesc);
		fdrop(fp_procdesc, td);
	}

	if ((fr->fr_flags & RFSTOPPED) == 0) {
		/*
		 * If RFSTOPPED not requested, make child runnable and
		 * add to run queue.
		 */
		thread_lock(td2);
		TD_SET_CAN_RUN(td2);
		sched_add(td2, SRQ_BORING);
		thread_unlock(td2);
		if (fr->fr_pidp != NULL)
			*fr->fr_pidp = p2->p_pid;
	} else {
		*fr->fr_procp = p2;
	}

	PROC_LOCK(p2);
	/*
	 * Wait until debugger is attached to child.
	 */
	while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
		cv_wait(&p2->p_dbgwait, &p2->p_mtx);
	_PRELE(p2);
	racct_proc_fork_done(p2);
	PROC_UNLOCK(p2);
}
示例#20
0
static bool_t
svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
    struct sockaddr **addrp, struct mbuf **mp)
{
	struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
	struct uio uio;
	struct mbuf *m;
	struct socket* so = xprt->xp_socket;
	XDR xdrs;
	int error, rcvflag;
	uint32_t xid_plus_direction[2];

	/*
	 * Serialise access to the socket and our own record parsing
	 * state.
	 */
	sx_xlock(&xprt->xp_lock);

	for (;;) {
		/* If we have no request ready, check pending queue. */
		while (cd->mpending &&
		    (cd->mreq == NULL || cd->resid != 0 || !cd->eor)) {
			if (!svc_vc_process_pending(xprt))
				break;
		}

		/* Process and return complete request in cd->mreq. */
		if (cd->mreq != NULL && cd->resid == 0 && cd->eor) {

			/*
			 * Now, check for a backchannel reply.
			 * The XID is in the first uint32_t of the reply
			 * and the message direction is the second one.
			 */
			if ((cd->mreq->m_len >= sizeof(xid_plus_direction) ||
			    m_length(cd->mreq, NULL) >=
			    sizeof(xid_plus_direction)) &&
			    xprt->xp_p2 != NULL) {
				m_copydata(cd->mreq, 0,
				    sizeof(xid_plus_direction),
				    (char *)xid_plus_direction);
				xid_plus_direction[0] =
				    ntohl(xid_plus_direction[0]);
				xid_plus_direction[1] =
				    ntohl(xid_plus_direction[1]);
				/* Check message direction. */
				if (xid_plus_direction[1] == REPLY) {
					clnt_bck_svccall(xprt->xp_p2,
					    cd->mreq,
					    xid_plus_direction[0]);
					cd->mreq = NULL;
					continue;
				}
			}

			xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
			cd->mreq = NULL;

			/* Check for next request in a pending queue. */
			svc_vc_process_pending(xprt);
			if (cd->mreq == NULL || cd->resid != 0) {
				SOCKBUF_LOCK(&so->so_rcv);
				if (!soreadable(so))
					xprt_inactive_self(xprt);
				SOCKBUF_UNLOCK(&so->so_rcv);
			}

			sx_xunlock(&xprt->xp_lock);

			if (! xdr_callmsg(&xdrs, msg)) {
				XDR_DESTROY(&xdrs);
				return (FALSE);
			}

			*addrp = NULL;
			*mp = xdrmbuf_getall(&xdrs);
			XDR_DESTROY(&xdrs);

			return (TRUE);
		}

		/*
		 * The socket upcall calls xprt_active() which will eventually
		 * cause the server to call us here. We attempt to
		 * read as much as possible from the socket and put
		 * the result in cd->mpending. If the read fails,
		 * we have drained both cd->mpending and the socket so
		 * we can call xprt_inactive().
		 */
		uio.uio_resid = 1000000000;
		uio.uio_td = curthread;
		m = NULL;
		rcvflag = MSG_DONTWAIT;
		error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);

		if (error == EWOULDBLOCK) {
			/*
			 * We must re-test for readability after
			 * taking the lock to protect us in the case
			 * where a new packet arrives on the socket
			 * after our call to soreceive fails with
			 * EWOULDBLOCK.
			 */
			SOCKBUF_LOCK(&so->so_rcv);
			if (!soreadable(so))
				xprt_inactive_self(xprt);
			SOCKBUF_UNLOCK(&so->so_rcv);
			sx_xunlock(&xprt->xp_lock);
			return (FALSE);
		}

		if (error) {
			SOCKBUF_LOCK(&so->so_rcv);
			if (xprt->xp_upcallset) {
				xprt->xp_upcallset = 0;
				soupcall_clear(so, SO_RCV);
			}
			SOCKBUF_UNLOCK(&so->so_rcv);
			xprt_inactive_self(xprt);
			cd->strm_stat = XPRT_DIED;
			sx_xunlock(&xprt->xp_lock);
			return (FALSE);
		}

		if (!m) {
			/*
			 * EOF - the other end has closed the socket.
			 */
			xprt_inactive_self(xprt);
			cd->strm_stat = XPRT_DIED;
			sx_xunlock(&xprt->xp_lock);
			return (FALSE);
		}

		if (cd->mpending)
			m_last(cd->mpending)->m_next = m;
		else
			cd->mpending = m;
	}
}
示例#21
0
文件: sfxge.c 项目: AhmadTux/freebsd
static int
sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
{
	struct sfxge_softc *sc;
	struct ifreq *ifr;
	int error;

	ifr = (struct ifreq *)data;
	sc = ifp->if_softc;
	error = 0;

	switch (command) {
	case SIOCSIFFLAGS:
		sx_xlock(&sc->softc_lock);
		if (ifp->if_flags & IFF_UP) {
			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
				if ((ifp->if_flags ^ sc->if_flags) &
				    (IFF_PROMISC | IFF_ALLMULTI)) {
					sfxge_mac_filter_set(sc);
				}
			} else
				sfxge_start(sc);
		} else
			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
				sfxge_stop(sc);
		sc->if_flags = ifp->if_flags;
		sx_xunlock(&sc->softc_lock);
		break;
	case SIOCSIFMTU:
		if (ifr->ifr_mtu == ifp->if_mtu) {
			/* Nothing to do */
			error = 0;
		} else if (ifr->ifr_mtu > SFXGE_MAX_MTU) {
			error = EINVAL;
		} else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
			ifp->if_mtu = ifr->ifr_mtu;
			error = 0;
		} else {
			/* Restart required */
			sx_xlock(&sc->softc_lock);
			sfxge_stop(sc);
			ifp->if_mtu = ifr->ifr_mtu;
			error = sfxge_start(sc);
			sx_xunlock(&sc->softc_lock);
			if (error) {
				ifp->if_flags &= ~IFF_UP;
				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
				if_down(ifp);
			}
		}
		break;
	case SIOCADDMULTI:
	case SIOCDELMULTI:
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
			sfxge_mac_filter_set(sc);
		break;
	case SIOCSIFCAP:
		sx_xlock(&sc->softc_lock);

		/*
		 * The networking core already rejects attempts to
		 * enable capabilities we don't have.  We still have
		 * to reject attempts to disable capabilities that we
		 * can't (yet) disable.
		 */
		if (~ifr->ifr_reqcap & SFXGE_CAP_FIXED) {
			error = EINVAL;
			sx_xunlock(&sc->softc_lock);
			break;
		}

		ifp->if_capenable = ifr->ifr_reqcap;
		if (ifp->if_capenable & IFCAP_TXCSUM)
			ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
		else
			ifp->if_hwassist &= ~(CSUM_IP | CSUM_TCP | CSUM_UDP);
		if (ifp->if_capenable & IFCAP_TSO)
			ifp->if_hwassist |= CSUM_TSO;
		else
			ifp->if_hwassist &= ~CSUM_TSO;

		sx_xunlock(&sc->softc_lock);
		break;
	case SIOCSIFMEDIA:
	case SIOCGIFMEDIA:
		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
		break;
	default:
		error = ether_ioctl(ifp, command, data);
	}

	return (error);
}
示例#22
0
void
sbunlock(struct sockbuf *sb)
{

	sx_xunlock(&sb->sb_sx);
}
示例#23
0
/* System calls. */
int
sys_shm_open(struct thread *td, struct shm_open_args *uap)
{
	struct filedesc *fdp;
	struct shmfd *shmfd;
	struct file *fp;
	char *path;
	Fnv32_t fnv;
	mode_t cmode;
	int fd, error;

#ifdef CAPABILITY_MODE
	/*
	 * shm_open(2) is only allowed for anonymous objects.
	 */
	if (IN_CAPABILITY_MODE(td) && (uap->path != SHM_ANON))
		return (ECAPMODE);
#endif

	if ((uap->flags & O_ACCMODE) != O_RDONLY &&
	    (uap->flags & O_ACCMODE) != O_RDWR)
		return (EINVAL);

	if ((uap->flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC)) != 0)
		return (EINVAL);

	fdp = td->td_proc->p_fd;
	cmode = (uap->mode & ~fdp->fd_cmask) & ACCESSPERMS;

	error = falloc(td, &fp, &fd, 0);
	if (error)
		return (error);

	/* A SHM_ANON path pointer creates an anonymous object. */
	if (uap->path == SHM_ANON) {
		/* A read-only anonymous object is pointless. */
		if ((uap->flags & O_ACCMODE) == O_RDONLY) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			return (EINVAL);
		}
		shmfd = shm_alloc(td->td_ucred, cmode);
	} else {
		path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
		error = copyinstr(uap->path, path, MAXPATHLEN, NULL);

		/* Require paths to start with a '/' character. */
		if (error == 0 && path[0] != '/')
			error = EINVAL;
		if (error) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			free(path, M_SHMFD);
			return (error);
		}

		fnv = fnv_32_str(path, FNV1_32_INIT);
		sx_xlock(&shm_dict_lock);
		shmfd = shm_lookup(path, fnv);
		if (shmfd == NULL) {
			/* Object does not yet exist, create it if requested. */
			if (uap->flags & O_CREAT) {
#ifdef MAC
				error = mac_posixshm_check_create(td->td_ucred,
				    path);
				if (error == 0) {
#endif
					shmfd = shm_alloc(td->td_ucred, cmode);
					shm_insert(path, fnv, shmfd);
#ifdef MAC
				}
#endif
			} else {
				free(path, M_SHMFD);
				error = ENOENT;
			}
		} else {
			/*
			 * Object already exists, obtain a new
			 * reference if requested and permitted.
			 */
			free(path, M_SHMFD);
			if ((uap->flags & (O_CREAT | O_EXCL)) ==
			    (O_CREAT | O_EXCL))
				error = EEXIST;
			else {
#ifdef MAC
				error = mac_posixshm_check_open(td->td_ucred,
				    shmfd, FFLAGS(uap->flags & O_ACCMODE));
				if (error == 0)
#endif
				error = shm_access(shmfd, td->td_ucred,
				    FFLAGS(uap->flags & O_ACCMODE));
			}

			/*
			 * Truncate the file back to zero length if
			 * O_TRUNC was specified and the object was
			 * opened with read/write.
			 */
			if (error == 0 &&
			    (uap->flags & (O_ACCMODE | O_TRUNC)) ==
			    (O_RDWR | O_TRUNC)) {
#ifdef MAC
				error = mac_posixshm_check_truncate(
					td->td_ucred, fp->f_cred, shmfd);
				if (error == 0)
#endif
					shm_dotruncate(shmfd, 0);
			}
			if (error == 0)
				shm_hold(shmfd);
		}
		sx_xunlock(&shm_dict_lock);

		if (error) {
			fdclose(fdp, fp, fd, td);
			fdrop(fp, td);
			return (error);
		}
	}

	finit(fp, FFLAGS(uap->flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);

	FILEDESC_XLOCK(fdp);
	if (fdp->fd_ofiles[fd] == fp)
		fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
	FILEDESC_XUNLOCK(fdp);
	td->td_retval[0] = fd;
	fdrop(fp, td);

	return (0);
}
示例#24
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0;
	vm_paddr_t pa;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr;

	/* XXX UPS Why ? */
	GIANT_REQUIRED;


	if (dev2unit(dev) != CDEV_MINOR_MEM && dev2unit(dev) != CDEV_MINOR_KMEM)
		return EIO;

	if (dev2unit(dev) == CDEV_MINOR_KMEM && uio->uio_resid > 0) {
		if (uio->uio_offset < (vm_offset_t)VADDR(PTDPTDI, 0))
				return (EFAULT);

		if (!kernacc((caddr_t)(int)uio->uio_offset, uio->uio_resid,
		    uio->uio_rw == UIO_READ ?  VM_PROT_READ : VM_PROT_WRITE))
			return (EFAULT);
	}

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset;
			pa &= ~PAGE_MASK;
		} else {
			/*
			 * Extract the physical page since the mapping may
			 * change at any time. This avoids panics on page 
			 * fault in this case but will cause reading/writing
			 * to the wrong page.
			 * Hopefully an application will notice the wrong
			 * data on read access and refrain from writing.
			 * This should be replaced by a special uiomove
			 * type function that just returns an error if there
			 * is a page fault on a kernel page. 
			 */
			addr = trunc_page(uio->uio_offset);
			pa = pmap_extract(kernel_pmap, addr);
			if (pa == 0) 
				return EFAULT;

		}
		
		/* 
		 * XXX UPS This should just use sf_buf_alloc.
		 * Unfortunately sf_buf_alloc needs a vm_page
		 * and we may want to look at memory not covered
		 * by the page array.
		 */

		sx_xlock(&memsxlock);
		pmap_kenter((vm_offset_t)ptvmmap, pa);
		pmap_invalidate_page(kernel_pmap,(vm_offset_t)ptvmmap);

		o = (int)uio->uio_offset & PAGE_MASK;
		c = PAGE_SIZE - o;
		c = min(c, (u_int)iov->iov_len);
		error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
		pmap_qremove((vm_offset_t)ptvmmap, 1);
		sx_xunlock(&memsxlock);
		
	}

	return (error);
}
示例#25
0
int
i915_reset(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	/*
	 * We really should only reset the display subsystem if we actually
	 * need to
	 */
	bool need_display = true;
	int ret;

	if (!i915_try_reset)
		return (0);

	if (!sx_try_xlock(&dev->dev_struct_lock))
		return (-EBUSY);

	i915_gem_reset(dev);

	ret = -ENODEV;
	if (time_second - dev_priv->last_gpu_reset < 5) {
		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
	} else
		ret = intel_gpu_reset(dev);

	dev_priv->last_gpu_reset = time_second;
	if (ret) {
		DRM_ERROR("Failed to reset chip.\n");
		DRM_UNLOCK(dev);
		return (ret);
	}

	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
	    !dev_priv->mm.suspended) {
		dev_priv->mm.suspended = 0;

		i915_gem_init_swizzling(dev);

		dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
		if (HAS_BSD(dev))
			dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
		if (HAS_BLT(dev))
			dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);

		i915_gem_context_init(dev);
		i915_gem_init_ppgtt(dev);

		drm_irq_uninstall(dev);
		drm_mode_config_reset(dev);
		DRM_UNLOCK(dev);
		drm_irq_install(dev);
		DRM_LOCK(dev);
	}
	DRM_UNLOCK(dev);

	if (need_display) {
		sx_xlock(&dev->mode_config.mutex);
		drm_helper_resume_force_mode(dev);
		sx_xunlock(&dev->mode_config.mutex);
	}

	return (0);
}
示例#26
0
/**
 * radeon_fence_wait_seq - wait for a specific sequence number
 *
 * @rdev: radeon device pointer
 * @target_seq: sequence number we want to wait for
 * @ring: ring index the fence is associated with
 * @intr: use interruptable sleep
 * @lock_ring: whether the ring should be locked or not
 *
 * Wait for the requested sequence number to be written (all asics).
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the sequence number.  Helper function
 * for radeon_fence_wait(), et al.
 * Returns 0 if the sequence number has passed, error for all other cases.
 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
 * marked as not ready so no further jobs get scheduled until a successful
 * reset.
 */
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
                                 unsigned ring, bool intr, bool lock_ring)
{
    unsigned long timeout, last_activity;
    uint64_t seq;
    unsigned i;
    bool signaled, fence_queue_locked;
    int r;

    while (target_seq > atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) {
        if (!rdev->ring[ring].ready) {
            return -EBUSY;
        }

        timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
        if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
            /* the normal case, timeout is somewhere before last_activity */
            timeout = rdev->fence_drv[ring].last_activity - timeout;
        } else {
            /* either jiffies wrapped around, or no fence was signaled in the last 500ms
             * anyway we will just wait for the minimum amount and then check for a lockup
             */
            timeout = 1;
        }
        seq = atomic_load_acq_64(&rdev->fence_drv[ring].last_seq);
        /* Save current last activity valuee, used to check for GPU lockups */
        last_activity = rdev->fence_drv[ring].last_activity;

        CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)",
             ring, seq);

        radeon_irq_kms_sw_irq_get(rdev, ring);
        fence_queue_locked = false;
        r = 0;
        while (!(signaled = radeon_fence_seq_signaled(rdev,
                            target_seq, ring))) {
            if (!fence_queue_locked) {
                mtx_lock(&rdev->fence_queue_mtx);
                fence_queue_locked = true;
            }
            if (intr) {
                r = cv_timedwait_sig(&rdev->fence_queue,
                                     &rdev->fence_queue_mtx,
                                     timeout);
            } else {
                r = cv_timedwait(&rdev->fence_queue,
                                 &rdev->fence_queue_mtx,
                                 timeout);
            }
            if (r == EINTR)
                r = ERESTARTSYS;
            if (r != 0) {
                if (r == EWOULDBLOCK) {
                    signaled =
                        radeon_fence_seq_signaled(
                            rdev, target_seq, ring);
                }
                break;
            }
        }
        if (fence_queue_locked) {
            mtx_unlock(&rdev->fence_queue_mtx);
        }
        radeon_irq_kms_sw_irq_put(rdev, ring);
        if (unlikely(r == ERESTARTSYS)) {
            return -r;
        }
        CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)",
             ring, seq);

        if (unlikely(!signaled)) {
#ifndef __FreeBSD__
            /* we were interrupted for some reason and fence
             * isn't signaled yet, resume waiting */
            if (r) {
                continue;
            }
#endif

            /* check if sequence value has changed since last_activity */
            if (seq != atomic_load_acq_64(&rdev->fence_drv[ring].last_seq)) {
                continue;
            }

            if (lock_ring) {
                sx_xlock(&rdev->ring_lock);
            }

            /* test if somebody else has already decided that this is a lockup */
            if (last_activity != rdev->fence_drv[ring].last_activity) {
                if (lock_ring) {
                    sx_xunlock(&rdev->ring_lock);
                }
                continue;
            }

            if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
                /* good news we believe it's a lockup */
                dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016jx last fence id 0x%016jx)\n",
                         (uintmax_t)target_seq, (uintmax_t)seq);

                /* change last activity so nobody else think there is a lockup */
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                    rdev->fence_drv[i].last_activity = jiffies;
                }

                /* mark the ring as not ready any more */
                rdev->ring[ring].ready = false;
                if (lock_ring) {
                    sx_xunlock(&rdev->ring_lock);
                }
                return -EDEADLK;
            }

            if (lock_ring) {
                sx_xunlock(&rdev->ring_lock);
            }
        }
    }
    return 0;
}
示例#27
0
static int
icl_listen_add_tcp(struct icl_listen *il, int domain, int socktype,
    int protocol, struct sockaddr *sa, int portal_id)
{
	struct icl_listen_sock *ils;
	struct socket *so;
	struct sockopt sopt;
	int error, one = 1;

	error = socreate(domain, &so, socktype, protocol,
	    curthread->td_ucred, curthread);
	if (error != 0) {
		ICL_WARN("socreate failed with error %d", error);
		return (error);
	}

	sopt.sopt_dir = SOPT_SET;
	sopt.sopt_level = SOL_SOCKET;
	sopt.sopt_name = SO_REUSEADDR;
	sopt.sopt_val = &one;
	sopt.sopt_valsize = sizeof(one);
	sopt.sopt_td = NULL;
	error = sosetopt(so, &sopt);
	if (error != 0) {
		ICL_WARN("failed to set SO_REUSEADDR with error %d", error);
		soclose(so);
		return (error);
	}

	error = sobind(so, sa, curthread);
	if (error != 0) {
		ICL_WARN("sobind failed with error %d", error);
		soclose(so);
		return (error);
	}

	error = solisten(so, -1, curthread);
	if (error != 0) {
		ICL_WARN("solisten failed with error %d", error);
		soclose(so);
		return (error);
	}

	ils = malloc(sizeof(*ils), M_ICL_PROXY, M_ZERO | M_WAITOK);
	ils->ils_listen = il;
	ils->ils_socket = so;
	ils->ils_id = portal_id;

	error = kthread_add(icl_accept_thread, ils, NULL, NULL, 0, 0, "iclacc");
	if (error != 0) {
		ICL_WARN("kthread_add failed with error %d", error);
		soclose(so);
		free(ils, M_ICL_PROXY);

		return (error);
	}

	sx_xlock(&il->il_lock);
	TAILQ_INSERT_TAIL(&il->il_sockets, ils, ils_next);
	sx_xunlock(&il->il_lock);

	return (0);
}
static int
xenbus_resume(device_t dev)
{
	device_t *kids;
	struct xenbus_device_ivars *ivars;
	int i, count, error;
	char *statepath;

	xb_init_comms();
	xs_resume();

	/*
	 * We must re-examine each device and find the new path for
	 * its backend.
	 */
	if (device_get_children(dev, &kids, &count) == 0) {
		for (i = 0; i < count; i++) {
			if (device_get_state(kids[i]) == DS_NOTPRESENT)
				continue;

			ivars = device_get_ivars(kids[i]);

			unregister_xenbus_watch(
				&ivars->xd_otherend_watch);
			ivars->xd_state = XenbusStateInitialising;

			/*
			 * Find the new backend details and
			 * re-register our watch.
			 */
			free(ivars->xd_otherend_path, M_DEVBUF);
			error = xenbus_gather(XBT_NIL, ivars->xd_node,
			    "backend-id", "%i", &ivars->xd_otherend_id,
			    "backend", NULL, &ivars->xd_otherend_path,
			    NULL);
			if (error)
				return (error);

			DEVICE_RESUME(kids[i]);

			statepath = malloc(strlen(ivars->xd_otherend_path)
			    + strlen("/state") + 1, M_DEVBUF, M_WAITOK);
			sprintf(statepath, "%s/state", ivars->xd_otherend_path);

			free(ivars->xd_otherend_watch.node, M_DEVBUF);
			ivars->xd_otherend_watch.node = statepath;
			register_xenbus_watch(
				&ivars->xd_otherend_watch);

#if 0
			/*
			 * Can't do this yet since we are running in
			 * the xenwatch thread and if we sleep here,
			 * we will stop delivering watch notifications
			 * and the device will never come back online.
			 */
			sx_xlock(&ivars->xd_lock);
			while (ivars->xd_state != XenbusStateClosed
			    && ivars->xd_state != XenbusStateConnected)
				sx_sleep(&ivars->xd_state, &ivars->xd_lock,
				    0, "xdresume", 0);
			sx_xunlock(&ivars->xd_lock);
#endif
		}
		free(kids, M_TEMP);
	}

	return (0);
}