예제 #1
0
/*
 * Remove a PG from a hwset
 */
static void
pghw_set_remove(group_t *hwset, pghw_t *pg)
{
	int result;

	result = group_remove(hwset, pg, GRP_RESIZE);
	ASSERT(result == 0);
}
예제 #2
0
void	CPHGeometryOwner::remove_geom( CODEGeom* g )
{
	VERIFY( b_builded );
	VERIFY( m_group );
	GEOM_I gi = std::find( m_geoms.begin(), m_geoms.end(), g );
	VERIFY( gi != m_geoms.end());
	//(*gi)->remove_from_space( m_group );
	group_remove( *g );
	m_geoms.erase( gi );
}
예제 #3
0
파일: skills.c 프로젝트: rezalas/riftshadow
/* recusively removes a group given its number -- uses group_remove */
void gn_remove( CHAR_DATA *ch, int gn)
{
    int i;

    ch->pcdata->group_known[gn] = FALSE;

    for ( i = 0; i < MAX_IN_GROUP; i ++)
    {
	if (group_table[gn].spells[i] == NULL)
	    break;
	group_remove(ch,group_table[gn].spells[i]);
    }
}
예제 #4
0
/*
 * Class callback when a CPU is leaving the system (deletion)
 *
 * "pgdata" is a reference to the CPU's PG data to be deconstructed.
 *
 * cp->cpu_pg is used by the dispatcher to access the CPU's PG data
 * references a "bootstrap" structure across this function's invocation.
 * pg_cmt_cpu_init() and the routines it calls must be careful to operate only
 * on the "pgdata" argument, and not cp->cpu_pg.
 */
static void
pg_cmt_cpu_fini(cpu_t *cp, cpu_pg_t *pgdata)
{
	group_iter_t	i;
	pg_cmt_t	*pg;
	group_t		*pgs, *cmt_pgs;
	lgrp_handle_t	lgrp_handle;
	cmt_lgrp_t	*lgrp;

	if (cmt_sched_disabled)
		return;

	ASSERT(pg_cpu_is_bootstrapped(cp));

	pgs = &pgdata->pgs;
	cmt_pgs = &pgdata->cmt_pgs;

	/*
	 * Find the lgroup that encapsulates this CPU's CMT hierarchy
	 */
	lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id);

	lgrp = pg_cmt_find_lgrp(lgrp_handle);
	if (ncpus == 1 && lgrp != cpu0_lgrp) {
		/*
		 * One might wonder how we could be deconfiguring the
		 * only CPU in the system.
		 *
		 * On Starcat systems when null_proc_lpa is detected,
		 * the boot CPU (which is already configured into a leaf
		 * lgroup), is moved into the root lgroup. This is done by
		 * deconfiguring it from both lgroups and processor
		 * groups), and then later reconfiguring it back in.  This
		 * call to pg_cmt_cpu_fini() is part of that deconfiguration.
		 *
		 * This special case is detected by noting that the platform
		 * has changed the CPU's lgrp affiliation (since it now
		 * belongs in the root). In this case, use the cmt_lgrp_t
		 * cached for the boot CPU, since this is what needs to be
		 * torn down.
		 */
		lgrp = cpu0_lgrp;
	}

	ASSERT(lgrp != NULL);

	/*
	 * First, clean up anything load balancing specific for each of
	 * the CPU's PGs that participated in CMT load balancing
	 */
	pg = (pg_cmt_t *)pgdata->cmt_lineage;
	while (pg != NULL) {

		/*
		 * Remove the PG from the CPU's load balancing lineage
		 */
		(void) group_remove(cmt_pgs, pg, GRP_RESIZE);

		/*
		 * If it's about to become empty, destroy it's children
		 * group, and remove it's reference from it's siblings.
		 * This is done here (rather than below) to avoid removing
		 * our reference from a PG that we just eliminated.
		 */
		if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 1) {
			if (pg->cmt_children != NULL)
				group_destroy(pg->cmt_children);
			if (pg->cmt_siblings != NULL) {
				if (pg->cmt_siblings == &lgrp->cl_pgs)
					lgrp->cl_npgs--;
				else
					pg->cmt_parent->cmt_nchildren--;
			}
		}
		pg = pg->cmt_parent;
	}
	ASSERT(GROUP_SIZE(cmt_pgs) == 0);

	/*
	 * Now that the load balancing lineage updates have happened,
	 * remove the CPU from all it's PGs (destroying any that become
	 * empty).
	 */
	group_iter_init(&i);
	while ((pg = group_iterate(pgs, &i)) != NULL) {
		if (IS_CMT_PG(pg) == 0)
			continue;

		pg_cpu_delete((pg_t *)pg, cp, pgdata);
		/*
		 * Deleting the CPU from the PG changes the CPU's
		 * PG group over which we are actively iterating
		 * Re-initialize the iteration
		 */
		group_iter_init(&i);

		if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 0) {

			/*
			 * The PG has become zero sized, so destroy it.
			 */
			group_destroy(&pg->cmt_cpus_actv);
			bitset_fini(&pg->cmt_cpus_actv_set);
			pghw_fini((pghw_t *)pg);

			pg_destroy((pg_t *)pg);
		}
	}
}
예제 #5
0
/*
 * Promote PG above it's current parent.
 * This is only legal if PG has an equal or greater number of CPUs than its
 * parent.
 *
 * This routine operates on the CPU specific processor group data (for the CPUs
 * in the PG being promoted), and may be invoked from a context where one CPU's
 * PG data is under construction. In this case the argument "pgdata", if not
 * NULL, is a reference to the CPU's under-construction PG data.
 */
static void
cmt_hier_promote(pg_cmt_t *pg, cpu_pg_t *pgdata)
{
	pg_cmt_t	*parent;
	group_t		*children;
	cpu_t		*cpu;
	group_iter_t	iter;
	pg_cpu_itr_t	cpu_iter;
	int		r;
	int		err;

	ASSERT(MUTEX_HELD(&cpu_lock));

	parent = pg->cmt_parent;
	if (parent == NULL) {
		/*
		 * Nothing to do
		 */
		return;
	}

	ASSERT(PG_NUM_CPUS((pg_t *)pg) >= PG_NUM_CPUS((pg_t *)parent));

	/*
	 * We're changing around the hierarchy, which is actively traversed
	 * by the dispatcher. Pause CPUS to ensure exclusivity.
	 */
	pause_cpus(NULL);

	/*
	 * If necessary, update the parent's sibling set, replacing parent
	 * with PG.
	 */
	if (parent->cmt_siblings) {
		if (group_remove(parent->cmt_siblings, parent, GRP_NORESIZE)
		    != -1) {
			r = group_add(parent->cmt_siblings, pg, GRP_NORESIZE);
			ASSERT(r != -1);
		}
	}

	/*
	 * If the parent is at the top of the hierarchy, replace it's entry
	 * in the root lgroup's group of top level PGs.
	 */
	if (parent->cmt_parent == NULL &&
	    parent->cmt_siblings != &cmt_root->cl_pgs) {
		if (group_remove(&cmt_root->cl_pgs, parent, GRP_NORESIZE)
		    != -1) {
			r = group_add(&cmt_root->cl_pgs, pg, GRP_NORESIZE);
			ASSERT(r != -1);
		}
	}

	/*
	 * We assume (and therefore assert) that the PG being promoted is an
	 * only child of it's parent. Update the parent's children set
	 * replacing PG's entry with the parent (since the parent is becoming
	 * the child). Then have PG and the parent swap children sets.
	 */
	ASSERT(GROUP_SIZE(parent->cmt_children) <= 1);
	if (group_remove(parent->cmt_children, pg, GRP_NORESIZE) != -1) {
		r = group_add(parent->cmt_children, parent, GRP_NORESIZE);
		ASSERT(r != -1);
	}

	children = pg->cmt_children;
	pg->cmt_children = parent->cmt_children;
	parent->cmt_children = children;

	/*
	 * Update the sibling references for PG and it's parent
	 */
	pg->cmt_siblings = parent->cmt_siblings;
	parent->cmt_siblings = pg->cmt_children;

	/*
	 * Update any cached lineages in the per CPU pg data.
	 */
	PG_CPU_ITR_INIT(pg, cpu_iter);
	while ((cpu = pg_cpu_next(&cpu_iter)) != NULL) {
		int		idx;
		pg_cmt_t	*cpu_pg;
		cpu_pg_t	*pgd;	/* CPU's PG data */

		/*
		 * The CPU's whose lineage is under construction still
		 * references the bootstrap CPU PG data structure.
		 */
		if (pg_cpu_is_bootstrapped(cpu))
			pgd = pgdata;
		else
			pgd = cpu->cpu_pg;

		/*
		 * Iterate over the CPU's PGs updating the children
		 * of the PG being promoted, since they have a new parent.
		 */
		group_iter_init(&iter);
		while ((cpu_pg = group_iterate(&pgd->cmt_pgs, &iter)) != NULL) {
			if (cpu_pg->cmt_parent == pg) {
				cpu_pg->cmt_parent = parent;
			}
		}

		/*
		 * Update the CMT load balancing lineage
		 */
		if ((idx = group_find(&pgd->cmt_pgs, (void *)pg)) == -1) {
			/*
			 * Unless this is the CPU who's lineage is being
			 * constructed, the PG being promoted should be
			 * in the lineage.
			 */
			ASSERT(pg_cpu_is_bootstrapped(cpu));
			continue;
		}

		ASSERT(GROUP_ACCESS(&pgd->cmt_pgs, idx - 1) == parent);
		ASSERT(idx > 0);

		/*
		 * Have the child and the parent swap places in the CPU's
		 * lineage
		 */
		group_remove_at(&pgd->cmt_pgs, idx);
		group_remove_at(&pgd->cmt_pgs, idx - 1);
		err = group_add_at(&pgd->cmt_pgs, parent, idx);
		ASSERT(err == 0);
		err = group_add_at(&pgd->cmt_pgs, pg, idx - 1);
		ASSERT(err == 0);
	}

	/*
	 * Update the parent references for PG and it's parent
	 */
	pg->cmt_parent = parent->cmt_parent;
	parent->cmt_parent = pg;

	start_cpus();
}
예제 #6
0
/*
 * Class callback when a CPU goes inactive (offline)
 *
 * This is called in a context where CPUs are paused
 */
static void
pg_cmt_cpu_inactive(cpu_t *cp)
{
	int		err;
	group_t		*pgs;
	pg_cmt_t	*pg;
	cpu_t		*cpp;
	group_iter_t	i;
	pg_cpu_itr_t	cpu_itr;
	boolean_t	found;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (cmt_sched_disabled)
		return;

	pgs = &cp->cpu_pg->pgs;
	group_iter_init(&i);

	while ((pg = group_iterate(pgs, &i)) != NULL) {

		if (IS_CMT_PG(pg) == 0)
			continue;

		/*
		 * Remove the CPU from the CMT PGs active CPU group
		 * bitmap
		 */
		err = group_remove(&pg->cmt_cpus_actv, cp, GRP_NORESIZE);
		ASSERT(err == 0);

		bitset_del(&pg->cmt_cpus_actv_set, cp->cpu_seqid);

		/*
		 * If there are no more active CPUs in this PG over which
		 * load was balanced, remove it as a balancing candidate.
		 */
		if (GROUP_SIZE(&pg->cmt_cpus_actv) == 0 &&
		    (pg->cmt_policy & (CMT_BALANCE | CMT_COALESCE))) {
			err = group_remove(pg->cmt_siblings, pg, GRP_NORESIZE);
			ASSERT(err == 0);

			if (pg->cmt_parent == NULL &&
			    pg->cmt_siblings != &cmt_root->cl_pgs) {
				err = group_remove(&cmt_root->cl_pgs, pg,
				    GRP_NORESIZE);
				ASSERT(err == 0);
			}
		}

		/*
		 * Assert the number of active CPUs does not exceed
		 * the total number of CPUs in the PG
		 */
		ASSERT(GROUP_SIZE(&pg->cmt_cpus_actv) <=
		    GROUP_SIZE(&((pg_t *)pg)->pg_cpus));

		/*
		 * Update the PG bitset in the CPU's old partition
		 */
		found = B_FALSE;
		PG_CPU_ITR_INIT(pg, cpu_itr);
		while ((cpp = pg_cpu_next(&cpu_itr)) != NULL) {
			if (cpp == cp)
				continue;
			if (CPU_ACTIVE(cpp) &&
			    cpp->cpu_part->cp_id == cp->cpu_part->cp_id) {
				found = B_TRUE;
				break;
			}
		}
		if (!found) {
			bitset_del(&cp->cpu_part->cp_cmt_pgs,
			    ((pg_t *)pg)->pg_id);
		}
	}
}
예제 #7
0
static inline void group_release(FAR struct task_group_s *group)
{
  /* Free all un-reaped child exit status */

#if defined(CONFIG_SCHED_HAVE_PARENT) && defined(CONFIG_SCHED_CHILD_STATUS)
  group_removechildren(group);
#endif

#ifndef CONFIG_DISABLE_SIGNALS
  /* Release pending signals */

  sig_release(group);
#endif

#ifndef CONFIG_DISABLE_PTHREAD
  /* Release pthread resources */

  pthread_release(group);
#endif

#if CONFIG_NFILE_DESCRIPTORS > 0
  /* Free all file-related resources now.  We really need to close files as
   * soon as possible while we still have a functioning task.
   */

  /* Free resources held by the file descriptor list */

  files_releaselist(&group->tg_filelist);

#if CONFIG_NFILE_STREAMS > 0
  /* Free resource held by the stream list */

  lib_stream_release(group);

#endif /* CONFIG_NFILE_STREAMS */
#endif /* CONFIG_NFILE_DESCRIPTORS */

#if CONFIG_NSOCKET_DESCRIPTORS > 0
  /* Free resource held by the socket list */

  net_releaselist(&group->tg_socketlist);
#endif /* CONFIG_NSOCKET_DESCRIPTORS */

#ifndef CONFIG_DISABLE_ENVIRON
  /* Release all shared environment variables */

  env_release(group);
#endif

#ifndef CONFIG_DISABLE_MQUEUE
  /* Close message queues opened by members of the group */

  mq_release(group);
#endif

#if defined(CONFIG_BUILD_KERNEL) && defined(CONFIG_MM_SHM)
  /* Release any resource held by shared memory virtual page allocator */

  (void)shm_group_release(group);
#endif

#ifdef CONFIG_ARCH_ADDRENV
  /* Destroy the group address environment */

  (void)up_addrenv_destroy(&group->tg_addrenv);

  /* Mark no address environment */

  g_gid_current = 0;
#endif

#if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV)
  /* Remove the group from the list of groups */

  group_remove(group);
#endif

#ifdef HAVE_GROUP_MEMBERS
  /* Release the members array */

  if (group->tg_members)
    {
      sched_kfree(group->tg_members);
      group->tg_members = NULL;
    }
#endif

#if CONFIG_NFILE_STREAMS > 0 && defined(CONFIG_MM_KERNEL_HEAP)
  /* In a flat, single-heap build.  The stream list is part of the
   * group structure and, hence will be freed when the group structure
   * is freed.  Otherwise, it is separately allocated an must be
   * freed here.
   */

#  if defined(CONFIG_BUILD_PROTECTED)
  /* In the protected build, the task's stream list is always allocated
   * and freed from the single, global user allocator.
   */

  sched_ufree(group->tg_streamlist);

#  elif defined(CONFIG_BUILD_KERNEL)
  /* In the kernel build, the unprivileged process' stream list will be
   * allocated from with its per-process, private user heap. But in that
   * case, there is no reason to do anything here:  That allocation resides
   * in the user heap which which be completely freed when we destroy the
   * process' address environment.
   */

  if ((group->tg_flags & GROUP_FLAG_PRIVILEGED) != 0)
    {
      /* But kernel threads are different in this build configuration: Their
       * stream lists were allocated from the common, global kernel heap and
       * must explicitly freed here.
       */

      sched_kfree(group->tg_streamlist);
    }

#  endif
#endif

#if defined(CONFIG_SCHED_WAITPID) && !defined(CONFIG_SCHED_HAVE_PARENT)
  /* If there are threads waiting for this group to be freed, then we cannot
   * yet free the memory resources.  Instead just mark the group deleted
   * and wait for those threads complete their waits.
   */

  if (group->tg_nwaiters > 0)
    {
      group->tg_flags |= GROUP_FLAG_DELETED;
    }
  else
#endif
    {
      /* Release the group container itself */

      sched_kfree(group);
    }
}