Exemplo n.º 1
0
static void
sfxge_stop(struct sfxge_softc *sc)
{
	sx_assert(&sc->softc_lock, LA_XLOCKED);

	if (sc->init_state != SFXGE_STARTED)
		return;

	sc->init_state = SFXGE_REGISTERED;

	/* Stop the port. */
	sfxge_port_stop(sc);

	/* Stop the transmitter. */
	sfxge_tx_stop(sc);

	/* Stop the receiver. */
	sfxge_rx_stop(sc);

	/* Stop processing events. */
	sfxge_ev_stop(sc);

	/* Stop processing interrupts. */
	sfxge_intr_stop(sc);

	efx_nic_fini(sc->enp);

	sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
}
Exemplo n.º 2
0
struct proc *
proc_realparent(struct proc *child)
{
	struct proc *p, *parent;

	sx_assert(&proctree_lock, SX_LOCKED);
	if ((child->p_treeflag & P_TREE_ORPHANED) == 0) {
		if (child->p_oppid == 0 ||
		    child->p_pptr->p_pid == child->p_oppid)
			parent = child->p_pptr;
		else
			parent = initproc;
		return (parent);
	}
	for (p = child; (p->p_treeflag & P_TREE_FIRST_ORPHAN) == 0;) {
		/* Cannot use LIST_PREV(), since the list head is not known. */
		p = __containerof(p->p_orphan.le_prev, struct proc,
		    p_orphan.le_next);
		KASSERT((p->p_treeflag & P_TREE_ORPHANED) != 0,
		    ("missing P_ORPHAN %p", p));
	}
	parent = __containerof(p->p_orphan.le_prev, struct proc,
	    p_orphans.lh_first);
	return (parent);
}
Exemplo n.º 3
0
static void
dircache_update(struct pefs_dircache_entry *pde, int onlist)
{
	struct pefs_dircache *pd = pde->pde_dircache;

	sx_assert(&pd->pd_lock, SA_XLOCKED);

	if ((pd->pd_flags & PD_UPDATING) != 0) {
		PEFSDEBUG("pefs_dircache_update: %s -> %s\n",
		    pde->pde_name, pde->pde_encname);
		pde->pde_gen = pd->pd_gen;
		if (onlist != 0)
			LIST_REMOVE(pde, pde_dir_entry);
		LIST_INSERT_HEAD(DIRCACHE_ACTIVEHEAD(pd), pde, pde_dir_entry);
	} else if (pd->pd_gen == 0 || pd->pd_gen != pde->pde_gen) {
		PEFSDEBUG("pefs_dircache: inconsistent cache: "
		    "gen=%ld old_gen=%ld name=%s\n",
		    pd->pd_gen, pde->pde_gen, pde->pde_name);
		dircache_expire(pd);
		pde->pde_gen = 0;
		if (onlist == 0)
			LIST_INSERT_HEAD(DIRCACHE_STALEHEAD(pd), pde,
					pde_dir_entry);
	}
}
Exemplo n.º 4
0
/*
 * Close out the log.
 */
static void
filemon_close_log(struct filemon *filemon)
{
	struct file *fp;
	struct timeval now;
	size_t len;

	sx_assert(&filemon->lock, SA_XLOCKED);
	if (filemon->fp == NULL)
		return;

	getmicrotime(&now);

	len = snprintf(filemon->msgbufr,
	    sizeof(filemon->msgbufr),
	    "# Stop %ju.%06ju\n# Bye bye\n",
	    (uintmax_t)now.tv_sec, (uintmax_t)now.tv_usec);

	filemon_output(filemon, filemon->msgbufr, len);
	fp = filemon->fp;
	filemon->fp = NULL;

	sx_xunlock(&filemon->lock);
	fdrop(fp, curthread);
	sx_xlock(&filemon->lock);
}
Exemplo n.º 5
0
int
pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
{
	struct proc *p;
	cap_rights_t rights;
	int error;

	sx_assert(&pmc_sx, SA_XLOCKED);
	PMCDBG2(LOG,CFG,1, "config po=%p logfd=%d", po, logfd);

	p = po->po_owner;

	/* return EBUSY if a log file was already present */
	if (po->po_flags & PMC_PO_OWNS_LOGFILE)
		return (EBUSY);

	KASSERT(po->po_file == NULL,
	    ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po,
		po->po_file));

	/* get a reference to the file state */
	error = fget_write(curthread, logfd,
	    cap_rights_init(&rights, CAP_WRITE), &po->po_file);
	if (error)
		goto error;

	/* mark process as owning a log file */
	po->po_flags |= PMC_PO_OWNS_LOGFILE;

	/* mark process as using HWPMCs */
	PROC_LOCK(p);
	p->p_flag |= P_HWPMC;
	PROC_UNLOCK(p);

	/* create a log initialization entry */
	PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE,
	    sizeof(struct pmclog_initialize));
	PMCLOG_EMIT32(PMC_VERSION);
	PMCLOG_EMIT32(md->pmd_cputype);
	PMCLOG_DESPATCH(po);

	return (0);

 error:
	KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not "
	    "stopped", __LINE__, po));

	if (po->po_file)
		(void) fdrop(po->po_file, curthread);
	po->po_file  = NULL;	/* clear file and error state */
	po->po_error = 0;
	po->po_flags &= ~PMC_PO_OWNS_LOGFILE;

	return (error);
}
Exemplo n.º 6
0
/* Attach the filemon to the process. */
static int
filemon_attach_proc(struct filemon *filemon, struct proc *p)
{
	struct filemon *filemon2;

	sx_assert(&filemon->lock, SA_XLOCKED);
	PROC_LOCK_ASSERT(p, MA_OWNED);
	KASSERT((p->p_flag & P_WEXIT) == 0,
	    ("%s: filemon %p attaching to exiting process %p",
	    __func__, filemon, p));
	KASSERT((p->p_flag & P_INEXEC) == 0,
	    ("%s: filemon %p attaching to execing process %p",
	    __func__, filemon, p));

	if (p->p_filemon == filemon)
		return (0);
	/*
	 * Don't allow truncating other process traces.  It is
	 * not really intended to trace procs other than curproc
	 * anyhow.
	 */
	if (p->p_filemon != NULL && p != curproc)
		return (EBUSY);
	/*
	 * Historic behavior of filemon has been to let a child initiate
	 * tracing on itself and cease existing tracing.  Bmake
	 * .META + .MAKE relies on this.  It is only relevant for attaching to
	 * curproc.
	 */
	while (p->p_filemon != NULL) {
		PROC_UNLOCK(p);
		sx_xunlock(&filemon->lock);
		while ((filemon2 = filemon_proc_get(p)) != NULL) {
			/* It may have changed. */
			if (p->p_filemon == filemon2)
				filemon_proc_drop(p);
			filemon_drop(filemon2);
		}
		sx_xlock(&filemon->lock);
		PROC_LOCK(p);
		/*
		 * It may have been attached to, though unlikely.
		 * Try again if needed.
		 */
	}

	KASSERT(p->p_filemon == NULL,
	    ("%s: proc %p didn't detach filemon %p", __func__, p,
	    p->p_filemon));
	p->p_filemon = filemon_acquire(filemon);
	++filemon->proccnt;

	return (0);
}
Exemplo n.º 7
0
static int
ffs_susp_suspended(struct mount *mp)
{
	struct ufsmount *ump;

	sx_assert(&ffs_susp_lock, SA_LOCKED);

	ump = VFSTOUFS(mp);
	if (ump->um_writesuspended)
		return (1);
	return (0);
}
Exemplo n.º 8
0
void
pefs_dircache_abortupdate(struct pefs_dircache *pd)
{
	sx_assert(&pd->pd_lock, SA_XLOCKED);

	if ((pd->pd_flags & PD_UPDATING) != 0) {
		PEFSDEBUG("pefs_dircache_abortupdate: gen=%lu %p\n",
		    pd->pd_gen, pd);
		dircache_expire(pd);
		pd->pd_flags &= ~PD_UPDATING;
	}
	DIRCACHE_ASSERT(pd);
}
Exemplo n.º 9
0
struct pefs_dircache_entry *
pefs_dircache_insert(struct pefs_dircache *pd, struct pefs_tkey *ptk,
    char const *name, size_t name_len,
    char const *encname, size_t encname_len)
{
	struct pefs_dircache_listhead *head;
	struct pefs_dircache_entry *pde;

	MPASS(ptk->ptk_key != NULL);
	sx_assert(&pd->pd_lock, SA_XLOCKED);

	if (name_len == 0 || name_len >= sizeof(pde->pde_name) ||
	    encname_len == 0 || encname_len >= sizeof(pde->pde_encname))
		panic("pefs: invalid file name length: %zd/%zd",
		    name_len, encname_len);

	pde = uma_zalloc(dircache_entry_zone, M_WAITOK | M_ZERO);
	pde->pde_dircache = pd;

	pde->pde_tkey = *ptk;
	pefs_key_ref(pde->pde_tkey.ptk_key);

	pde->pde_namelen = name_len;
	memcpy(pde->pde_name, name, name_len);
	pde->pde_name[name_len] = '\0';
	pde->pde_namehash = dircache_hashname(pd, pde->pde_name,
	    pde->pde_namelen);

	pde->pde_encnamelen = encname_len;
	memcpy(pde->pde_encname, encname, encname_len);
	pde->pde_encname[encname_len] = '\0';
	pde->pde_encnamehash = dircache_hashname(pd, pde->pde_encname,
	    pde->pde_encnamelen);

	/* Insert into list and set pge_gen */
	dircache_update(pde, 0);

	mtx_lock(&dircache_mtx);
	head = &dircache_tbl[pde->pde_namehash & pefs_dircache_hashmask];
	LIST_INSERT_HEAD(head, pde, pde_hash_entry);
	head = &dircache_enctbl[pde->pde_encnamehash & pefs_dircache_hashmask];
	LIST_INSERT_HEAD(head, pde, pde_enchash_entry);
	dircache_entries++;
	mtx_unlock(&dircache_mtx);

	PEFSDEBUG("pefs_dircache_insert: hash=%x enchash=%x: %s -> %s\n",
	    pde->pde_namehash, pde->pde_encnamehash,
	    pde->pde_name, pde->pde_encname);

	return (pde);
}
Exemplo n.º 10
0
uintptr_t
unlock_sx(struct lock_object *lock)
{
	struct sx *sx;

	sx = (struct sx *)lock;
	sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
	if (sx_xlocked(sx)) {
		sx_xunlock(sx);
		return (0);
	} else {
		sx_sunlock(sx);
		return (1);
	}
}
Exemplo n.º 11
0
static void
clear_orphan(struct proc *p)
{
	struct proc *p1;

	sx_assert(&proctree_lock, SA_XLOCKED);
	if ((p->p_treeflag & P_TREE_ORPHANED) == 0)
		return;
	if ((p->p_treeflag & P_TREE_FIRST_ORPHAN) != 0) {
		p1 = LIST_NEXT(p, p_orphan);
		if (p1 != NULL)
			p1->p_treeflag |= P_TREE_FIRST_ORPHAN;
		p->p_treeflag &= ~P_TREE_FIRST_ORPHAN;
	}
	LIST_REMOVE(p, p_orphan);
	p->p_treeflag &= ~P_TREE_ORPHANED;
}
Exemplo n.º 12
0
/*
 * Iterate through directory entries
 */
static int
pfs_iterate(struct thread *td, struct proc *proc, struct pfs_node *pd,
	    struct pfs_node **pn, struct proc **p)
{
	int visible;

	sx_assert(&allproc_lock, SX_SLOCKED);
	pfs_assert_owned(pd);
 again:
	if (*pn == NULL) {
		/* first node */
		*pn = pd->pn_nodes;
	} else if ((*pn)->pn_type != pfstype_procdir) {
		/* next node */
		*pn = (*pn)->pn_next;
	}
	if (*pn != NULL && (*pn)->pn_type == pfstype_procdir) {
		/* next process */
		if (*p == NULL)
			*p = LIST_FIRST(&allproc);
		else
			*p = LIST_NEXT(*p, p_list);
		/* out of processes: next node */
		if (*p == NULL)
			*pn = (*pn)->pn_next;
		else
			PROC_LOCK(*p);
	}

	if ((*pn) == NULL)
		return (-1);

	if (*p != NULL) {
		visible = pfs_visible_proc(td, *pn, *p);
		PROC_UNLOCK(*p);
	} else if (proc != NULL) {
		visible = pfs_visible_proc(td, *pn, proc);
	} else {
		visible = 1;
	}
	if (!visible)
		goto again;

	return (0);
}
Exemplo n.º 13
0
/*
 * Release a reference and free on the last one.
 */
static void
filemon_release(struct filemon *filemon)
{

	if (refcount_release(&filemon->refcnt) == 0)
		return;
	/*
	 * There are valid cases of releasing while locked, such as in
	 * filemon_untrack_processes, but none which are done where there
	 * is not at least 1 reference remaining.
	 */
	sx_assert(&filemon->lock, SA_UNLOCKED);

	if (filemon->cred != NULL)
		crfree(filemon->cred);
	sx_destroy(&filemon->lock);
	free(filemon, M_FILEMON);
}
Exemplo n.º 14
0
void
pefs_dircache_endupdate(struct pefs_dircache *pd)
{
	struct pefs_dircache_entry *pde;

	sx_assert(&pd->pd_lock, SA_XLOCKED);

	if ((pd->pd_flags & PD_UPDATING) == 0) {
		DIRCACHE_ASSERT(pd);
		return;
	}

	while (!LIST_EMPTY(DIRCACHE_STALEHEAD(pd))) {
		pde = LIST_FIRST(DIRCACHE_STALEHEAD(pd));
		dircache_entry_free(pde);
	}
	pd->pd_flags &= ~PD_UPDATING;
}
Exemplo n.º 15
0
/*
 * Locate an attribute given a name and mountpoint.
 * Must be holding uepm lock for the mount point.
 */
static struct ufs_extattr_list_entry *
ufs_extattr_find_attr(struct ufsmount *ump, int attrnamespace,
    const char *attrname)
{
	struct ufs_extattr_list_entry *search_attribute;

	sx_assert(&ump->um_extattr.uepm_lock, SA_XLOCKED);

	for (search_attribute = LIST_FIRST(&ump->um_extattr.uepm_list);
	    search_attribute != NULL;
	    search_attribute = LIST_NEXT(search_attribute, uele_entries)) {
		if (!(strncmp(attrname, search_attribute->uele_attrname,
		    UFS_EXTATTR_MAXEXTATTRNAME)) &&
		    (attrnamespace == search_attribute->uele_attrnamespace)) {
			return (search_attribute);
		}
	}

	return (0);
}
Exemplo n.º 16
0
/*
 * Invalidate the passed filemon in all processes.
 */
static void
filemon_untrack_processes(struct filemon *filemon)
{
	struct proc *p;

	sx_assert(&filemon->lock, SA_XLOCKED);

	/* Avoid allproc loop if there is no need. */
	if (filemon->proccnt == 0)
		return;

	/*
	 * Processes in this list won't go away while here since
	 * filemon_event_process_exit() will lock on filemon->lock
	 * which we hold.
	 */
	sx_slock(&allproc_lock);
	FOREACH_PROC_IN_SYSTEM(p) {
		/*
		 * No PROC_LOCK is needed to compare here since it is
		 * guaranteed to not change since we have its filemon
		 * locked.  Everything that changes this p_filemon will
		 * be locked on it.
		 */
		if (p->p_filemon == filemon)
			filemon_proc_drop(p);
	}
	sx_sunlock(&allproc_lock);

	/*
	 * It's possible some references were acquired but will be
	 * dropped shortly as they are restricted from being
	 * inherited.  There is at least the reference in cdevpriv remaining.
	 */
	KASSERT(filemon->refcnt > 0, ("%s: filemon %p should have "
	    "references still.", __func__, filemon));
	KASSERT(filemon->proccnt == 0, ("%s: filemon %p should not have "
	    "attached procs still.", __func__, filemon));
}
Exemplo n.º 17
0
/* Remove and release the filemon on the given process. */
static void
filemon_proc_drop(struct proc *p)
{
	struct filemon *filemon;

	KASSERT(p->p_filemon != NULL, ("%s: proc %p NULL p_filemon",
	    __func__, p));
	sx_assert(&p->p_filemon->lock, SA_XLOCKED);
	PROC_LOCK(p);
	filemon = p->p_filemon;
	p->p_filemon = NULL;
	--filemon->proccnt;
	PROC_UNLOCK(p);
	/*
	 * This should not be the last reference yet.  filemon_release()
	 * cannot be called with filemon locked, which the caller expects
	 * will stay locked.
	 */
	KASSERT(filemon->refcnt > 1, ("%s: proc %p dropping filemon %p "
	    "with last reference", __func__, p, filemon));
	filemon_release(filemon);
}
Exemplo n.º 18
0
void
assert_sx(const struct lock_object *lock, int what)
{

	sx_assert((const struct sx *)lock, what);
}
Exemplo n.º 19
0
static void
do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
    struct vmspace *vm2, struct file *fp_procdesc)
{
	struct proc *p1, *pptr;
	int trypid;
	struct filedesc *fd;
	struct filedesc_to_leader *fdtol;
	struct sigacts *newsigacts;

	sx_assert(&proctree_lock, SX_SLOCKED);
	sx_assert(&allproc_lock, SX_XLOCKED);

	p1 = td->td_proc;

	trypid = fork_findpid(fr->fr_flags);

	sx_sunlock(&proctree_lock);

	p2->p_state = PRS_NEW;		/* protect against others */
	p2->p_pid = trypid;
	AUDIT_ARG_PID(p2->p_pid);
	LIST_INSERT_HEAD(&allproc, p2, p_list);
	allproc_gen++;
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	tidhash_add(td2);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	sx_xunlock(&allproc_lock);

	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    __rangeof(struct proc, p_startcopy, p_endcopy));
	pargs_hold(p2->p_args);

	PROC_UNLOCK(p1);

	bzero(&p2->p_startzero,
	    __rangeof(struct proc, p_startzero, p_endzero));

	/* Tell the prison that we exist. */
	prison_proc_hold(p2->p_ucred->cr_prison);

	PROC_UNLOCK(p2);

	/*
	 * Malloc things while we don't hold any locks.
	 */
	if (fr->fr_flags & RFSIGSHARE)
		newsigacts = NULL;
	else
		newsigacts = sigacts_alloc();

	/*
	 * Copy filedesc.
	 */
	if (fr->fr_flags & RFCFDG) {
		fd = fdinit(p1->p_fd, false);
		fdtol = NULL;
	} else if (fr->fr_flags & RFFDG) {
		fd = fdcopy(p1->p_fd);
		fdtol = NULL;
	} else {
		fd = fdshare(p1->p_fd);
		if (p1->p_fdtol == NULL)
			p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
			    p1->p_leader);
		if ((fr->fr_flags & RFTHREAD) != 0) {
			/*
			 * Shared file descriptor table, and shared
			 * process leaders.
			 */
			fdtol = p1->p_fdtol;
			FILEDESC_XLOCK(p1->p_fd);
			fdtol->fdl_refcount++;
			FILEDESC_XUNLOCK(p1->p_fd);
		} else {
			/* 
			 * Shared file descriptor table, and different
			 * process leaders.
			 */
			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
			    p1->p_fd, p2);
		}
	}
	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */

	PROC_LOCK(p2);
	PROC_LOCK(p1);

	bzero(&td2->td_startzero,
	    __rangeof(struct thread, td_startzero, td_endzero));

	bcopy(&td->td_startcopy, &td2->td_startcopy,
	    __rangeof(struct thread, td_startcopy, td_endcopy));

	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
	td2->td_sigstk = td->td_sigstk;
	td2->td_flags = TDF_INMEM;
	td2->td_lend_user_pri = PRI_MAX;

#ifdef VIMAGE
	td2->td_vnet = NULL;
	td2->td_vnet_lpush = NULL;
#endif

	/*
	 * Allow the scheduler to initialize the child.
	 */
	thread_lock(td);
	sched_fork(td, td2);
	thread_unlock(td);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 */
	p2->p_flag = P_INMEM;
	p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP);
	p2->p_swtick = ticks;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);

	/*
	 * Whilst the proc lock is held, copy the VM domain data out
	 * using the VM domain method.
	 */
	vm_domain_policy_init(&p2->p_vm_dom_policy);
	vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
	    &p1->p_vm_dom_policy);

	if (fr->fr_flags & RFSIGSHARE) {
		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
	} else {
		sigacts_copy(newsigacts, p1->p_sigacts);
		p2->p_sigacts = newsigacts;
	}

	if (fr->fr_flags & RFTSIGZMB)
	        p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
	else if (fr->fr_flags & RFLINUXTHPN)
	        p2->p_sigparent = SIGUSR1;
	else
	        p2->p_sigparent = SIGCHLD;

	p2->p_textvp = p1->p_textvp;
	p2->p_fd = fd;
	p2->p_fdtol = fdtol;

	if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
		p2->p_flag |= P_PROTECTED;
		p2->p_flag2 |= P2_INHERIT_PROTECTED;
	}

	/*
	 * p_limit is copy-on-write.  Bump its refcount.
	 */
	lim_fork(p1, p2);

	thread_cow_get_proc(td2, p2);

	pstats_fork(p1->p_stats, p2->p_stats);

	PROC_UNLOCK(p1);
	PROC_UNLOCK(p2);

	/* Bump references to the text vnode (for procfs). */
	if (p2->p_textvp)
		vrefact(p2->p_textvp);

	/*
	 * Set up linkage for kernel based threading.
	 */
	if ((fr->fr_flags & RFTHREAD) != 0) {
		mtx_lock(&ppeers_lock);
		p2->p_peers = p1->p_peers;
		p1->p_peers = p2;
		p2->p_leader = p1->p_leader;
		mtx_unlock(&ppeers_lock);
		PROC_LOCK(p1->p_leader);
		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
			PROC_UNLOCK(p1->p_leader);
			/*
			 * The task leader is exiting, so process p1 is
			 * going to be killed shortly.  Since p1 obviously
			 * isn't dead yet, we know that the leader is either
			 * sending SIGKILL's to all the processes in this
			 * task or is sleeping waiting for all the peers to
			 * exit.  We let p1 complete the fork, but we need
			 * to go ahead and kill the new process p2 since
			 * the task leader may not get a chance to send
			 * SIGKILL to it.  We leave it on the list so that
			 * the task leader will wait for this new process
			 * to commit suicide.
			 */
			PROC_LOCK(p2);
			kern_psignal(p2, SIGKILL);
			PROC_UNLOCK(p2);
		} else
			PROC_UNLOCK(p1->p_leader);
	} else {
		p2->p_peers = NULL;
		p2->p_leader = p2;
	}

	sx_xlock(&proctree_lock);
	PGRP_LOCK(p1->p_pgrp);
	PROC_LOCK(p2);
	PROC_LOCK(p1);

	/*
	 * Preserve some more flags in subprocess.  P_PROFIL has already
	 * been preserved.
	 */
	p2->p_flag |= p1->p_flag & P_SUGID;
	td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING;
	SESS_LOCK(p1->p_session);
	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		p2->p_flag |= P_CONTROLT;
	SESS_UNLOCK(p1->p_session);
	if (fr->fr_flags & RFPPWAIT)
		p2->p_flag |= P_PPWAIT;

	p2->p_pgrp = p1->p_pgrp;
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	PGRP_UNLOCK(p1->p_pgrp);
	LIST_INIT(&p2->p_children);
	LIST_INIT(&p2->p_orphans);

	callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);

	/*
	 * If PF_FORK is set, the child process inherits the
	 * procfs ioctl flags from its parent.
	 */
	if (p1->p_pfsflags & PF_FORK) {
		p2->p_stops = p1->p_stops;
		p2->p_pfsflags = p1->p_pfsflags;
	}

	/*
	 * This begins the section where we must prevent the parent
	 * from being swapped.
	 */
	_PHOLD(p1);
	PROC_UNLOCK(p1);

	/*
	 * Attach the new process to its parent.
	 *
	 * If RFNOWAIT is set, the newly created process becomes a child
	 * of init.  This effectively disassociates the child from the
	 * parent.
	 */
	if ((fr->fr_flags & RFNOWAIT) != 0) {
		pptr = p1->p_reaper;
		p2->p_reaper = pptr;
	} else {
		p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
		    p1 : p1->p_reaper;
		pptr = p1;
	}
	p2->p_pptr = pptr;
	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
	LIST_INIT(&p2->p_reaplist);
	LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
	if (p2->p_reaper == p1)
		p2->p_reapsubtree = p2->p_pid;
	sx_xunlock(&proctree_lock);

	/* Inform accounting that we have forked. */
	p2->p_acflag = AFORK;
	PROC_UNLOCK(p2);

#ifdef KTRACE
	ktrprocfork(p1, p2);
#endif

	/*
	 * Finish creating the child process.  It will return via a different
	 * execution path later.  (ie: directly into user mode)
	 */
	vm_forkproc(td, p2, td2, vm2, fr->fr_flags);

	if (fr->fr_flags == (RFFDG | RFPROC)) {
		VM_CNT_INC(v_forks);
		VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
		VM_CNT_INC(v_vforks);
		VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else if (p1 == &proc0) {
		VM_CNT_INC(v_kthreads);
		VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	} else {
		VM_CNT_INC(v_rforks);
		VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
		    p2->p_vmspace->vm_ssize);
	}

	/*
	 * Associate the process descriptor with the process before anything
	 * can happen that might cause that process to need the descriptor.
	 * However, don't do this until after fork(2) can no longer fail.
	 */
	if (fr->fr_flags & RFPROCDESC)
		procdesc_new(p2, fr->fr_pd_flags);

	/*
	 * Both processes are set up, now check if any loadable modules want
	 * to adjust anything.
	 */
	EVENTHANDLER_INVOKE(process_fork, p1, p2, fr->fr_flags);

	/*
	 * Set the child start time and mark the process as being complete.
	 */
	PROC_LOCK(p2);
	PROC_LOCK(p1);
	microuptime(&p2->p_stats->p_start);
	PROC_SLOCK(p2);
	p2->p_state = PRS_NORMAL;
	PROC_SUNLOCK(p2);

#ifdef KDTRACE_HOOKS
	/*
	 * Tell the DTrace fasttrap provider about the new process so that any
	 * tracepoints inherited from the parent can be removed. We have to do
	 * this only after p_state is PRS_NORMAL since the fasttrap module will
	 * use pfind() later on.
	 */
	if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
		dtrace_fasttrap_fork(p1, p2);
#endif
	/*
	 * Hold the process so that it cannot exit after we make it runnable,
	 * but before we wait for the debugger.
	 */
	_PHOLD(p2);
	if (p1->p_ptevents & PTRACE_FORK) {
		/*
		 * Arrange for debugger to receive the fork event.
		 *
		 * We can report PL_FLAG_FORKED regardless of
		 * P_FOLLOWFORK settings, but it does not make a sense
		 * for runaway child.
		 */
		td->td_dbgflags |= TDB_FORK;
		td->td_dbg_forked = p2->p_pid;
		td2->td_dbgflags |= TDB_STOPATFORK;
	}
	if (fr->fr_flags & RFPPWAIT) {
		td->td_pflags |= TDP_RFPPWAIT;
		td->td_rfppwait_p = p2;
		td->td_dbgflags |= TDB_VFORK;
	}
	PROC_UNLOCK(p2);

	/*
	 * Now can be swapped.
	 */
	_PRELE(p1);
	PROC_UNLOCK(p1);

	/*
	 * Tell any interested parties about the new process.
	 */
	knote_fork(p1->p_klist, p2->p_pid);
	SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);

	if (fr->fr_flags & RFPROCDESC) {
		procdesc_finit(p2->p_procdesc, fp_procdesc);
		fdrop(fp_procdesc, td);
	}

	if ((fr->fr_flags & RFSTOPPED) == 0) {
		/*
		 * If RFSTOPPED not requested, make child runnable and
		 * add to run queue.
		 */
		thread_lock(td2);
		TD_SET_CAN_RUN(td2);
		sched_add(td2, SRQ_BORING);
		thread_unlock(td2);
		if (fr->fr_pidp != NULL)
			*fr->fr_pidp = p2->p_pid;
	} else {
		*fr->fr_procp = p2;
	}

	PROC_LOCK(p2);
	/*
	 * Wait until debugger is attached to child.
	 */
	while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0)
		cv_wait(&p2->p_dbgwait, &p2->p_mtx);
	_PRELE(p2);
	racct_proc_fork_done(p2);
	PROC_UNLOCK(p2);
}
Exemplo n.º 20
0
static int
fork_findpid(int flags)
{
	struct proc *p;
	int trypid;
	static int pidchecked = 0;

	/*
	 * Requires allproc_lock in order to iterate over the list
	 * of processes, and proctree_lock to access p_pgrp.
	 */
	sx_assert(&allproc_lock, SX_LOCKED);
	sx_assert(&proctree_lock, SX_LOCKED);

	/*
	 * Find an unused process ID.  We remember a range of unused IDs
	 * ready to use (from lastpid+1 through pidchecked-1).
	 *
	 * If RFHIGHPID is set (used during system boot), do not allocate
	 * low-numbered pids.
	 */
	trypid = lastpid + 1;
	if (flags & RFHIGHPID) {
		if (trypid < 10)
			trypid = 10;
	} else {
		if (randompid)
			trypid += arc4random() % randompid;
	}
retry:
	/*
	 * If the process ID prototype has wrapped around,
	 * restart somewhat above 0, as the low-numbered procs
	 * tend to include daemons that don't exit.
	 */
	if (trypid >= pid_max) {
		trypid = trypid % pid_max;
		if (trypid < 100)
			trypid += 100;
		pidchecked = 0;
	}
	if (trypid >= pidchecked) {
		int doingzomb = 0;

		pidchecked = PID_MAX;
		/*
		 * Scan the active and zombie procs to check whether this pid
		 * is in use.  Remember the lowest pid that's greater
		 * than trypid, so we can avoid checking for a while.
		 *
		 * Avoid reuse of the process group id, session id or
		 * the reaper subtree id.  Note that for process group
		 * and sessions, the amount of reserved pids is
		 * limited by process limit.  For the subtree ids, the
		 * id is kept reserved only while there is a
		 * non-reaped process in the subtree, so amount of
		 * reserved pids is limited by process limit times
		 * two.
		 */
		p = LIST_FIRST(&allproc);
again:
		for (; p != NULL; p = LIST_NEXT(p, p_list)) {
			while (p->p_pid == trypid ||
			    p->p_reapsubtree == trypid ||
			    (p->p_pgrp != NULL &&
			    (p->p_pgrp->pg_id == trypid ||
			    (p->p_session != NULL &&
			    p->p_session->s_sid == trypid)))) {
				trypid++;
				if (trypid >= pidchecked)
					goto retry;
			}
			if (p->p_pid > trypid && pidchecked > p->p_pid)
				pidchecked = p->p_pid;
			if (p->p_pgrp != NULL) {
				if (p->p_pgrp->pg_id > trypid &&
				    pidchecked > p->p_pgrp->pg_id)
					pidchecked = p->p_pgrp->pg_id;
				if (p->p_session != NULL &&
				    p->p_session->s_sid > trypid &&
				    pidchecked > p->p_session->s_sid)
					pidchecked = p->p_session->s_sid;
			}
		}
		if (!doingzomb) {
			doingzomb = 1;
			p = LIST_FIRST(&zombproc);
			goto again;
		}
	}

	/*
	 * RFHIGHPID does not mess with the lastpid counter during boot.
	 */
	if (flags & RFHIGHPID)
		pidchecked = 0;
	else
		lastpid = trypid;

	return (trypid);
}
Exemplo n.º 21
0
int
pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd)
{
	int error;
	struct proc *p;

	/*
	 * As long as it is possible to get a LOR between pmc_sx lock and
	 * proctree/allproc sx locks used for adding a new process, assure
	 * the former is not held here.
	 */
	sx_assert(&pmc_sx, SA_UNLOCKED);
	PMCDBG(LOG,CFG,1, "config po=%p logfd=%d", po, logfd);

	p = po->po_owner;

	/* return EBUSY if a log file was already present */
	if (po->po_flags & PMC_PO_OWNS_LOGFILE)
		return (EBUSY);

	KASSERT(po->po_kthread == NULL,
	    ("[pmclog,%d] po=%p kthread (%p) already present", __LINE__, po,
		po->po_kthread));
	KASSERT(po->po_file == NULL,
	    ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po,
		po->po_file));

	/* get a reference to the file state */
	error = fget_write(curthread, logfd, CAP_WRITE, &po->po_file);
	if (error)
		goto error;

	/* mark process as owning a log file */
	po->po_flags |= PMC_PO_OWNS_LOGFILE;
	error = kproc_create(pmclog_loop, po, &po->po_kthread,
	    RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid);
	if (error)
		goto error;

	/* mark process as using HWPMCs */
	PROC_LOCK(p);
	p->p_flag |= P_HWPMC;
	PROC_UNLOCK(p);

	/* create a log initialization entry */
	PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE,
	    sizeof(struct pmclog_initialize));
	PMCLOG_EMIT32(PMC_VERSION);
	PMCLOG_EMIT32(md->pmd_cputype);
	PMCLOG_DESPATCH(po);

	return (0);

 error:
	/* shutdown the thread */
	if (po->po_kthread)
		pmclog_stop_kthread(po);

	KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not "
	    "stopped", __LINE__, po));

	if (po->po_file)
		(void) fdrop(po->po_file, curthread);
	po->po_file  = NULL;	/* clear file and error state */
	po->po_error = 0;

	return (error);
}
Exemplo n.º 22
0
static int
sfxge_start(struct sfxge_softc *sc)
{
	int rc;

	sx_assert(&sc->softc_lock, LA_XLOCKED);

	if (sc->init_state == SFXGE_STARTED)
		return 0;

	if (sc->init_state != SFXGE_REGISTERED) {
		rc = EINVAL;
		goto fail;
	}

	if ((rc = efx_nic_init(sc->enp)) != 0)
		goto fail;

	/* Start processing interrupts. */
	if ((rc = sfxge_intr_start(sc)) != 0)
		goto fail2;

	/* Start processing events. */
	if ((rc = sfxge_ev_start(sc)) != 0)
		goto fail3;

	/* Start the receiver side. */
	if ((rc = sfxge_rx_start(sc)) != 0)
		goto fail4;

	/* Start the transmitter side. */
	if ((rc = sfxge_tx_start(sc)) != 0)
		goto fail5;

	/* Fire up the port. */
	if ((rc = sfxge_port_start(sc)) != 0)
		goto fail6;

	sc->init_state = SFXGE_STARTED;

	/* Tell the stack we're running. */
	sc->ifnet->if_drv_flags |= IFF_DRV_RUNNING;
	sc->ifnet->if_drv_flags &= ~IFF_DRV_OACTIVE;

	return (0);

fail6:
	sfxge_tx_stop(sc);

fail5:
	sfxge_rx_stop(sc);

fail4:
	sfxge_ev_stop(sc);

fail3:
	sfxge_intr_stop(sc);

fail2:
	efx_nic_fini(sc->enp);

fail:
	device_printf(sc->dev, "sfxge_start: %d\n", rc);

	return (rc);
}