コード例 #1
0
ファイル: subr_smp.c プロジェクト: AhmadTux/freebsd
static int
smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
    int count, int flags, int start)
{
	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
	cpuset_t mask;
	int i;

	CPU_ZERO(&mask);
	for (i = 0; i < count; i++, start++)
		CPU_SET(start, &mask);
	child->cg_parent = parent;
	child->cg_child = NULL;
	child->cg_children = 0;
	child->cg_level = share;
	child->cg_count = count;
	child->cg_flags = flags;
	child->cg_mask = mask;
	parent->cg_children++;
	for (; parent != NULL; parent = parent->cg_parent) {
		if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
			panic("Duplicate children in %p.  mask (%s) child (%s)",
			    parent,
			    cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
			    cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
		CPU_OR(&parent->cg_mask, &child->cg_mask);
		parent->cg_count += child->cg_count;
	}

	return (start);
}
コード例 #2
0
ファイル: subr_smp.c プロジェクト: AhmadTux/freebsd
struct cpu_group *
smp_topo_find(struct cpu_group *top, int cpu)
{
	struct cpu_group *cg;
	cpuset_t mask;
	int children;
	int i;

	CPU_SETOF(cpu, &mask);
	cg = top;
	for (;;) {
		if (!CPU_OVERLAP(&cg->cg_mask, &mask))
			return (NULL);
		if (cg->cg_children == 0)
			return (cg);
		children = cg->cg_children;
		for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
			if (CPU_OVERLAP(&cg->cg_mask, &mask))
				break;
	}
	return (NULL);
}
コード例 #3
0
/*
 * Recursively check for errors that would occur from applying mask to
 * the tree of sets starting at 'set'.  Checks for sets that would become
 * empty as well as RDONLY flags.
 */
static int
cpuset_testupdate(struct cpuset *set, cpuset_t *mask)
{
	struct cpuset *nset;
	cpuset_t newmask;
	int error;

	mtx_assert(&cpuset_lock, MA_OWNED);
	if (set->cs_flags & CPU_SET_RDONLY)
		return (EPERM);
	if (!CPU_OVERLAP(&set->cs_mask, mask))
		return (EDEADLK);
	CPU_COPY(&set->cs_mask, &newmask);
	CPU_AND(&newmask, mask);
	error = 0;
	LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
		if ((error = cpuset_testupdate(nset, &newmask)) != 0)
			break;
	return (error);
}
コード例 #4
0
ファイル: subr_smp.c プロジェクト: AhmadTux/freebsd
/*
 * Called by a CPU to restart stopped CPUs. 
 *
 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
 *
 *  - Signals all CPUs in map to restart.
 *  - Waits for each to restart.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 */
int
restart_cpus(cpuset_t map)
{
#ifdef KTR
	char cpusetbuf[CPUSETBUFSIZ];
#endif

	if (!smp_started)
		return 0;

	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));

	/* signal other cpus to restart */
	CPU_COPY_STORE_REL(&map, &started_cpus);

	/* wait for each to clear its bit */
	while (CPU_OVERLAP(&stopped_cpus, &map))
		cpu_spinwait();

	return 1;
}
コード例 #5
0
/*
 * Create a set in the space provided in 'set' with the provided parameters.
 * The set is returned with a single ref.  May return EDEADLK if the set
 * will have no valid cpu based on restrictions from the parent.
 */
static int
_cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask,
    cpusetid_t id)
{

	if (!CPU_OVERLAP(&parent->cs_mask, mask))
		return (EDEADLK);
	CPU_COPY(mask, &set->cs_mask);
	LIST_INIT(&set->cs_children);
	refcount_init(&set->cs_ref, 1);
	set->cs_flags = 0;
	mtx_lock_spin(&cpuset_lock);
	CPU_AND(&set->cs_mask, &parent->cs_mask);
	set->cs_id = id;
	set->cs_parent = cpuset_ref(parent);
	LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
	if (set->cs_id != CPUSET_INVALID)
		LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
	mtx_unlock_spin(&cpuset_lock);

	return (0);
}
コード例 #6
0
ファイル: subr_smp.c プロジェクト: cyrilmagsuci/freebsd
/*
 * Called by a CPU to restart stopped CPUs. 
 *
 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
 *
 *  - Signals all CPUs in map to restart.
 *  - Waits for each to restart.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 */
static int
generic_restart_cpus(cpuset_t map, u_int type)
{
#ifdef KTR
	char cpusetbuf[CPUSETBUFSIZ];
#endif
	volatile cpuset_t *cpus;

	KASSERT(
#if defined(__amd64__) || defined(__i386__)
	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
#else
	    type == IPI_STOP || type == IPI_STOP_HARD,
#endif
	    ("%s: invalid stop type", __func__));

	if (!smp_started)
		return 0;

	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));

#if defined(__amd64__) || defined(__i386__)
	if (type == IPI_SUSPEND)
		cpus = &suspended_cpus;
	else
#endif
		cpus = &stopped_cpus;

	/* signal other cpus to restart */
	CPU_COPY_STORE_REL(&map, &started_cpus);

	/* wait for each to clear its bit */
	while (CPU_OVERLAP(cpus, &map))
		cpu_spinwait();

	return 1;
}
コード例 #7
0
/*
 * Handle two cases for replacing the base set or mask of an entire process.
 *
 * 1) Set is non-null and mask is null.  This reparents all anonymous sets
 *    to the provided set and replaces all non-anonymous td_cpusets with the
 *    provided set.
 * 2) Mask is non-null and set is null.  This replaces or creates anonymous
 *    sets for every thread with the existing base as a parent.
 *
 * This is overly complicated because we can't allocate while holding a 
 * spinlock and spinlocks must be held while changing and examining thread
 * state.
 */
static int
cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
{
	struct setlist freelist;
	struct setlist droplist;
	struct cpuset *tdset;
	struct cpuset *nset;
	struct thread *td;
	struct proc *p;
	int threads;
	int nfree;
	int error;
	/*
	 * The algorithm requires two passes due to locking considerations.
	 * 
	 * 1) Lookup the process and acquire the locks in the required order.
	 * 2) If enough cpusets have not been allocated release the locks and
	 *    allocate them.  Loop.
	 */
	LIST_INIT(&freelist);
	LIST_INIT(&droplist);
	nfree = 0;
	for (;;) {
		error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
		if (error)
			goto out;
		if (nfree >= p->p_numthreads)
			break;
		threads = p->p_numthreads;
		PROC_UNLOCK(p);
		for (; nfree < threads; nfree++) {
			nset = uma_zalloc(cpuset_zone, M_WAITOK);
			LIST_INSERT_HEAD(&freelist, nset, cs_link);
		}
	}
	PROC_LOCK_ASSERT(p, MA_OWNED);
	/*
	 * Now that the appropriate locks are held and we have enough cpusets,
	 * make sure the operation will succeed before applying changes.  The
	 * proc lock prevents td_cpuset from changing between calls.
	 */
	error = 0;
	FOREACH_THREAD_IN_PROC(p, td) {
		thread_lock(td);
		tdset = td->td_cpuset;
		/*
		 * Verify that a new mask doesn't specify cpus outside of
		 * the set the thread is a member of.
		 */
		if (mask) {
			if (tdset->cs_id == CPUSET_INVALID)
				tdset = tdset->cs_parent;
			if (!CPU_SUBSET(&tdset->cs_mask, mask))
				error = EDEADLK;
		/*
		 * Verify that a new set won't leave an existing thread
		 * mask without a cpu to run on.  It can, however, restrict
		 * the set.
		 */
		} else if (tdset->cs_id == CPUSET_INVALID) {
			if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
				error = EDEADLK;
		}
		thread_unlock(td);
		if (error)
			goto unlock_out;
	}