Esempio n. 1
0
id_t
pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
{
	int impl;

	impl = cpunodes[cpu->cpu_id].implementation;

	switch (hw) {
	case PGHW_IPIPE:
		if (IS_OLYMPUS_C(impl) || IS_JUPITER(impl)) {
			/*
			 * Currently only Fujitsu Olympus-C (SPARC64-VI) and
			 * Jupiter (SPARC64-VII) processors support
			 * multi-stranded cores. Return the cpu_id with the
			 * strand bit masked out.
			 */
			return ((id_t)((uint_t)cpu->cpu_id & ~(0x1)));
		} else {
			return (cpu->cpu_id);
		}
	case PGHW_CHIP:
		return (cmp_cpu_to_chip(cpu->cpu_id));
	case PGHW_CACHE:
		if (IS_PANTHER(impl) ||
		    IS_OLYMPUS_C(impl) || IS_JUPITER(impl))
			return (pg_plat_hw_instance_id(cpu, PGHW_CHIP));
		else
			return (cpu->cpu_id);
	default:
		return (-1);
	}
}
Esempio n. 2
0
/*
 * Initialize the physical portion of a hardware PG
 */
void
pghw_init(pghw_t *pg, cpu_t *cp, pghw_type_t hw)
{
	group_t		*hwset;

	if ((hwset = pghw_set_lookup(hw)) == NULL) {
		/*
		 * Haven't seen this hardware type yet
		 */
		hwset = pghw_set_create(hw);
	}

	pghw_set_add(hwset, pg);
	pg->pghw_hw = hw;
	pg->pghw_instance =
	    pg_plat_hw_instance_id(cp, hw);
	pghw_kstat_create(pg);

	/*
	 * Hardware sharing relationship specific initialization
	 */
	switch (pg->pghw_hw) {
	case PGHW_POW_ACTIVE:
		pg->pghw_handle =
		    (pghw_handle_t)cpupm_domain_init(cp, CPUPM_DTYPE_ACTIVE);
		break;
	case PGHW_POW_IDLE:
		pg->pghw_handle =
		    (pghw_handle_t)cpupm_domain_init(cp, CPUPM_DTYPE_IDLE);
		break;
	default:
		pg->pghw_handle = (pghw_handle_t)NULL;
	}
}
Esempio n. 3
0
int
pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw)
{
	int impl;

	impl = cpunodes[cpu_a->cpu_id].implementation;

	switch (hw) {
	case PGHW_IPIPE:
	case PGHW_CHIP:
		return (pg_plat_hw_instance_id(cpu_a, hw) ==
		    pg_plat_hw_instance_id(cpu_b, hw));
	case PGHW_CACHE:
		if ((IS_PANTHER(impl) || IS_OLYMPUS_C(impl) ||
		    IS_JUPITER(impl)) && pg_plat_cpus_share(cpu_a,
		    cpu_b, PGHW_CHIP)) {
			return (1);
		} else {
			return (0);
		}
	}
	return (0);
}
Esempio n. 4
0
id_t
pg_plat_get_core_id(cpu_t *cp)
{
	return (pg_plat_hw_instance_id(cp, PGHW_IPIPE));
}
Esempio n. 5
0
/*
 * CMT class callback for a new CPU entering the system
 *
 * This routine operates on the CPU specific processor group data (for the CPU
 * being initialized). The argument "pgdata" is a reference to the CPU's PG
 * data to be constructed.
 *
 * cp->cpu_pg is used by the dispatcher to access the CPU's PG data
 * references a "bootstrap" structure. pg_cmt_cpu_init() and the routines it
 * calls must be careful to operate only on the "pgdata" argument, and not
 * cp->cpu_pg.
 */
static void
pg_cmt_cpu_init(cpu_t *cp, cpu_pg_t *pgdata)
{
	pg_cmt_t	*pg;
	group_t		*cmt_pgs;
	int		levels, level;
	pghw_type_t	hw;
	pg_t		*pg_cache = NULL;
	pg_cmt_t	*cpu_cmt_hier[PGHW_NUM_COMPONENTS];
	lgrp_handle_t	lgrp_handle;
	cmt_lgrp_t	*lgrp;
	cmt_lineage_validation_t	lineage_status;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(pg_cpu_is_bootstrapped(cp));

	if (cmt_sched_disabled)
		return;

	/*
	 * A new CPU is coming into the system.
	 * Interrogate the platform to see if the CPU
	 * has any performance or efficiency relevant
	 * sharing relationships
	 */
	cmt_pgs = &pgdata->cmt_pgs;
	pgdata->cmt_lineage = NULL;

	bzero(cpu_cmt_hier, sizeof (cpu_cmt_hier));
	levels = 0;
	for (hw = PGHW_START; hw < PGHW_NUM_COMPONENTS; hw++) {

		pg_cmt_policy_t	policy;

		/*
		 * We're only interested in the hw sharing relationships
		 * for which we know how to optimize.
		 */
		policy = pg_cmt_policy(hw);
		if (policy == CMT_NO_POLICY ||
		    pg_plat_hw_shared(cp, hw) == 0)
			continue;

		/*
		 * We will still create the PGs for hardware sharing
		 * relationships that have been blacklisted, but won't
		 * implement CMT thread placement optimizations against them.
		 */
		if (cmt_hw_blacklisted[hw] == 1)
			policy = CMT_NO_POLICY;

		/*
		 * Find (or create) the PG associated with
		 * the hw sharing relationship in which cp
		 * belongs.
		 *
		 * Determine if a suitable PG already
		 * exists, or if one needs to be created.
		 */
		pg = (pg_cmt_t *)pghw_place_cpu(cp, hw);
		if (pg == NULL) {
			/*
			 * Create a new one.
			 * Initialize the common...
			 */
			pg = (pg_cmt_t *)pg_create(pg_cmt_class_id);

			/* ... physical ... */
			pghw_init((pghw_t *)pg, cp, hw);

			/*
			 * ... and CMT specific portions of the
			 * structure.
			 */
			pg->cmt_policy = policy;

			/* CMT event callbacks */
			cmt_callback_init((pg_t *)pg);

			bitset_init(&pg->cmt_cpus_actv_set);
			group_create(&pg->cmt_cpus_actv);
		} else {
			ASSERT(IS_CMT_PG(pg));
		}

		/* Add the CPU to the PG */
		pg_cpu_add((pg_t *)pg, cp, pgdata);

		/*
		 * Ensure capacity of the active CPU group/bitset
		 */
		group_expand(&pg->cmt_cpus_actv,
		    GROUP_SIZE(&((pg_t *)pg)->pg_cpus));

		if (cp->cpu_seqid >=
		    bitset_capacity(&pg->cmt_cpus_actv_set)) {
			bitset_resize(&pg->cmt_cpus_actv_set,
			    cp->cpu_seqid + 1);
		}

		/*
		 * Build a lineage of CMT PGs for load balancing / coalescence
		 */
		if (policy & (CMT_BALANCE | CMT_COALESCE)) {
			cpu_cmt_hier[levels++] = pg;
		}

		/* Cache this for later */
		if (hw == PGHW_CACHE)
			pg_cache = (pg_t *)pg;
	}

	group_expand(cmt_pgs, levels);

	if (cmt_root == NULL)
		cmt_root = pg_cmt_lgrp_create(lgrp_plat_root_hand());

	/*
	 * Find the lgrp that encapsulates this CPU's CMT hierarchy
	 */
	lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id);
	if ((lgrp = pg_cmt_find_lgrp(lgrp_handle)) == NULL)
		lgrp = pg_cmt_lgrp_create(lgrp_handle);

	/*
	 * Ascendingly sort the PGs in the lineage by number of CPUs
	 */
	pg_cmt_hier_sort(cpu_cmt_hier, levels);

	/*
	 * Examine the lineage and validate it.
	 * This routine will also try to fix the lineage along with the
	 * rest of the PG hierarchy should it detect an issue.
	 *
	 * If it returns anything other than VALID or REPAIRED, an
	 * unrecoverable error has occurred, and we cannot proceed.
	 */
	lineage_status = pg_cmt_lineage_validate(cpu_cmt_hier, &levels, pgdata);
	if ((lineage_status != CMT_LINEAGE_VALID) &&
	    (lineage_status != CMT_LINEAGE_REPAIRED)) {
		/*
		 * In the case of an unrecoverable error where CMT scheduling
		 * has been disabled, assert that the under construction CPU's
		 * PG data has an empty CMT load balancing lineage.
		 */
		ASSERT((cmt_sched_disabled == 0) ||
		    (GROUP_SIZE(&(pgdata->cmt_pgs)) == 0));
		return;
	}

	/*
	 * For existing PGs in the lineage, verify that the parent is
	 * correct, as the generation in the lineage may have changed
	 * as a result of the sorting. Start the traversal at the top
	 * of the lineage, moving down.
	 */
	for (level = levels - 1; level >= 0; ) {
		int reorg;

		reorg = 0;
		pg = cpu_cmt_hier[level];

		/*
		 * Promote PGs at an incorrect generation into place.
		 */
		while (pg->cmt_parent &&
		    pg->cmt_parent != cpu_cmt_hier[level + 1]) {
			cmt_hier_promote(pg, pgdata);
			reorg++;
		}
		if (reorg > 0)
			level = levels - 1;
		else
			level--;
	}

	/*
	 * For each of the PGs in the CPU's lineage:
	 *	- Add an entry in the CPU sorted CMT PG group
	 *	  which is used for top down CMT load balancing
	 *	- Tie the PG into the CMT hierarchy by connecting
	 *	  it to it's parent and siblings.
	 */
	for (level = 0; level < levels; level++) {
		uint_t		children;
		int		err;

		pg = cpu_cmt_hier[level];
		err = group_add_at(cmt_pgs, pg, levels - level - 1);
		ASSERT(err == 0);

		if (level == 0)
			pgdata->cmt_lineage = (pg_t *)pg;

		if (pg->cmt_siblings != NULL) {
			/* Already initialized */
			ASSERT(pg->cmt_parent == NULL ||
			    pg->cmt_parent == cpu_cmt_hier[level + 1]);
			ASSERT(pg->cmt_siblings == &lgrp->cl_pgs ||
			    ((pg->cmt_parent != NULL) &&
			    pg->cmt_siblings == pg->cmt_parent->cmt_children));
			continue;
		}

		if ((level + 1) == levels) {
			pg->cmt_parent = NULL;

			pg->cmt_siblings = &lgrp->cl_pgs;
			children = ++lgrp->cl_npgs;
			if (cmt_root != lgrp)
				cmt_root->cl_npgs++;
		} else {
			pg->cmt_parent = cpu_cmt_hier[level + 1];

			/*
			 * A good parent keeps track of their children.
			 * The parent's children group is also the PG's
			 * siblings.
			 */
			if (pg->cmt_parent->cmt_children == NULL) {
				pg->cmt_parent->cmt_children =
				    kmem_zalloc(sizeof (group_t), KM_SLEEP);
				group_create(pg->cmt_parent->cmt_children);
			}
			pg->cmt_siblings = pg->cmt_parent->cmt_children;
			children = ++pg->cmt_parent->cmt_nchildren;
		}

		group_expand(pg->cmt_siblings, children);
		group_expand(&cmt_root->cl_pgs, cmt_root->cl_npgs);
	}

	/*
	 * Cache the chip and core IDs in the cpu_t->cpu_physid structure
	 * for fast lookups later.
	 */
	if (cp->cpu_physid) {
		cp->cpu_physid->cpu_chipid =
		    pg_plat_hw_instance_id(cp, PGHW_CHIP);
		cp->cpu_physid->cpu_coreid = pg_plat_get_core_id(cp);

		/*
		 * If this cpu has a PG representing shared cache, then set
		 * cpu_cacheid to that PG's logical id
		 */
		if (pg_cache)
			cp->cpu_physid->cpu_cacheid = pg_cache->pg_id;
	}

	/* CPU0 only initialization */
	if (is_cpu0) {
		is_cpu0 = 0;
		cpu0_lgrp = lgrp;
	}

}