Exemplo n.º 1
0
/*
 * Class callback when a CPU is entering a cpu partition
 */
static void
pg_cmt_cpupart_in(cpu_t *cp, cpupart_t *pp)
{
	group_t		*pgs;
	pg_t		*pg;
	group_iter_t	i;

	ASSERT(MUTEX_HELD(&cpu_lock));

	if (cmt_sched_disabled)
		return;

	pgs = &cp->cpu_pg->pgs;

	/*
	 * Ensure that the new partition's PG bitset
	 * is large enough for all CMT PG's to which cp
	 * belongs
	 */
	group_iter_init(&i);
	while ((pg = group_iterate(pgs, &i)) != NULL) {
		if (IS_CMT_PG(pg) == 0)
			continue;

		if (bitset_capacity(&pp->cp_cmt_pgs) <= pg->pg_id)
			bitset_resize(&pp->cp_cmt_pgs, pg->pg_id + 1);
	}
}
Exemplo n.º 2
0
/**
 * \brief Increase a number of pre-allocated items in a group
 *
 * \warning The size of the array can be only increased. If the functions
 *   is called with a smaller number of items, the function will do nothing
 *   and returns successfully.
 * \param[in,out] grp       Pointer to the group
 * \param[in]     items_cnt New of items
 * \return On success returns 0. Otherwise returns a non-zero value and the
 *   content of the group is unchanged.
 */
static int
group_resize(struct pevents_group *grp, size_t items_cnt)
{
	if (grp->all_prealloc >= items_cnt) {
		// Do not decrease the size
		return 0;
	}

	// Re-alloc the array
	struct pevents_item **new_items;
	new_items = realloc(grp->all_ptr, items_cnt * sizeof(*(grp->all_ptr)));
	if (!new_items) {
		MSG_ERROR(msg_module, "Unable to reallocate memory (%s:%d)",
			__FILE__, __LINE__);
		return 1;
	}

	grp->all_ptr = new_items;

	// Re-alloc the bitset
	if (bitset_resize(grp->bitset, items_cnt)) {
		// Failed to resize the bitset
		return 1;
	}

	grp->all_prealloc = items_cnt;
	return 0;
}
Exemplo n.º 3
0
/* Init context bitset for a radio device (hwarc/whci) */
void
uwba_init_ctxt_id(uwba_dev_t *uwba_dev)
{
	bitset_init(&uwba_dev->ctxt_bits); /* this bzero sizeof(bitset_t) */
	bitset_resize(&uwba_dev->ctxt_bits, 256); /* alloc mem */
	bitset_add(&uwba_dev->ctxt_bits, 0);
	bitset_add(&uwba_dev->ctxt_bits, 255);
}
Exemplo n.º 4
0
static int check_and_resize_bitset(bitset* bset, int bits_stored) {
    if(bits_stored >= bset->total_bits) {
        int resize_status = bitset_resize(bset, bset->total_bits * 2);
        if(resize_status == BITSET_ALLOC_ERROR) {
            return HUFFMAN_ALLOC_ERROR;
        }
    }

    return HUFFMAN_SUCCESS;
} 
Exemplo n.º 5
0
/*
 * Initialize the default partition and kpreempt disp queue.
 */
void
cpupart_initialize_default(void)
{
	lgrp_id_t i;

	cp_list_head = &cp_default;
	cp_default.cp_next = &cp_default;
	cp_default.cp_prev = &cp_default;
	cp_default.cp_id = CP_DEFAULT;
	cp_default.cp_kp_queue.disp_maxrunpri = -1;
	cp_default.cp_kp_queue.disp_max_unbound_pri = -1;
	cp_default.cp_kp_queue.disp_cpu = NULL;
	cp_default.cp_gen = 0;
	cp_default.cp_loadavg.lg_cur = 0;
	cp_default.cp_loadavg.lg_len = 0;
	cp_default.cp_loadavg.lg_total = 0;
	for (i = 0; i < S_LOADAVG_SZ; i++) {
		cp_default.cp_loadavg.lg_loads[i] = 0;
	}
	DISP_LOCK_INIT(&cp_default.cp_kp_queue.disp_lock);
	cp_id_next = CP_DEFAULT + 1;
	cpupart_kstat_create(&cp_default);
	cp_numparts = 1;
	if (cp_max_numparts == 0)	/* allow for /etc/system tuning */
		cp_max_numparts = max_ncpus * 2 + 1;
	/*
	 * Allocate space for cp_default list of lgrploads
	 */
	cpupart_lpl_initialize(&cp_default);

	/*
	 * The initial lpl topology is created in a special lpl list
	 * lpl_bootstrap. It should be copied to cp_default.
	 * NOTE: lpl_topo_bootstrap() also updates CPU0 cpu_lpl pointer to point
	 *	 to the correct lpl in the cp_default.cp_lgrploads list.
	 */
	lpl_topo_bootstrap(cp_default.cp_lgrploads,
	    cp_default.cp_nlgrploads);


	cp_default.cp_attr = PSET_NOESCAPE;
	cp_numparts_nonempty = 1;
	/*
	 * Set t0's home
	 */
	t0.t_lpl = &cp_default.cp_lgrploads[LGRP_ROOTID];

	bitset_init(&cp_default.cp_cmt_pgs);
	bitset_init_fanout(&cp_default.cp_haltset, cp_haltset_fanout);

	bitset_resize(&cp_default.cp_haltset, max_ncpus);
}
Exemplo n.º 6
0
void bitset_copy(bitset_t *bitset1, bitset_t *bitset2) {

	if (!bitset1->bits_m) return;
	if (!bitset2->bits_m) return;

	if (bitset1->nbits_m != bitset2->nbits_m) {
		bitset_resize(bitset1, bitset2->nbits_m);
	}

	if (bitset1->nbits_m != bitset2->nbits_m) return;

	memcpy(bitset1->bits_m, bitset2->bits_m, sizeof(uint32_t)*bitset1->nwords_m);
	bitset1->nset_m = bitset2->nset_m;
}
Exemplo n.º 7
0
/*
 * Allocate backing resources
 *
 * DAMs are lightly backed on create - major allocations occur
 * at the time a report is made to the map, and are extended on
 * a demand basis.
 */
static int
dam_map_alloc(dam_t *mapp)
{
	void *softstate_p;

	ASSERT(mutex_owned(&mapp->dam_lock));
	if (mapp->dam_flags & DAM_DESTROYPEND)
		return (DAM_FAILURE);

	/*
	 * dam_high > 0 signals map allocation complete
	 */
	if (mapp->dam_high)
		return (DAM_SUCCESS);

	mapp->dam_size = DAM_SIZE_BUMP;
	if (ddi_soft_state_init(&softstate_p, sizeof (dam_da_t),
	    mapp->dam_size) != DDI_SUCCESS)
		return (DAM_FAILURE);

	if (ddi_strid_init(&mapp->dam_addr_hash, mapp->dam_size) !=
	    DDI_SUCCESS) {
		ddi_soft_state_fini(softstate_p);
		return (DAM_FAILURE);
	}
	if (dam_kstat_create(mapp) != DDI_SUCCESS) {
		ddi_soft_state_fini(softstate_p);
		ddi_strid_fini(&mapp->dam_addr_hash);
		return (DAM_FAILURE);
	}
	mapp->dam_da = softstate_p;
	mapp->dam_high = 1;
	bitset_resize(&mapp->dam_active_set, mapp->dam_size);
	bitset_resize(&mapp->dam_stable_set, mapp->dam_size);
	bitset_resize(&mapp->dam_report_set, mapp->dam_size);
	return (DAM_SUCCESS);
}
Exemplo n.º 8
0
/*
 * CMT class callback for a new CPU entering the system
 *
 * This routine operates on the CPU specific processor group data (for the CPU
 * being initialized). The argument "pgdata" is a reference to the CPU's PG
 * data to be constructed.
 *
 * cp->cpu_pg is used by the dispatcher to access the CPU's PG data
 * references a "bootstrap" structure. pg_cmt_cpu_init() and the routines it
 * calls must be careful to operate only on the "pgdata" argument, and not
 * cp->cpu_pg.
 */
static void
pg_cmt_cpu_init(cpu_t *cp, cpu_pg_t *pgdata)
{
	pg_cmt_t	*pg;
	group_t		*cmt_pgs;
	int		levels, level;
	pghw_type_t	hw;
	pg_t		*pg_cache = NULL;
	pg_cmt_t	*cpu_cmt_hier[PGHW_NUM_COMPONENTS];
	lgrp_handle_t	lgrp_handle;
	cmt_lgrp_t	*lgrp;
	cmt_lineage_validation_t	lineage_status;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(pg_cpu_is_bootstrapped(cp));

	if (cmt_sched_disabled)
		return;

	/*
	 * A new CPU is coming into the system.
	 * Interrogate the platform to see if the CPU
	 * has any performance or efficiency relevant
	 * sharing relationships
	 */
	cmt_pgs = &pgdata->cmt_pgs;
	pgdata->cmt_lineage = NULL;

	bzero(cpu_cmt_hier, sizeof (cpu_cmt_hier));
	levels = 0;
	for (hw = PGHW_START; hw < PGHW_NUM_COMPONENTS; hw++) {

		pg_cmt_policy_t	policy;

		/*
		 * We're only interested in the hw sharing relationships
		 * for which we know how to optimize.
		 */
		policy = pg_cmt_policy(hw);
		if (policy == CMT_NO_POLICY ||
		    pg_plat_hw_shared(cp, hw) == 0)
			continue;

		/*
		 * We will still create the PGs for hardware sharing
		 * relationships that have been blacklisted, but won't
		 * implement CMT thread placement optimizations against them.
		 */
		if (cmt_hw_blacklisted[hw] == 1)
			policy = CMT_NO_POLICY;

		/*
		 * Find (or create) the PG associated with
		 * the hw sharing relationship in which cp
		 * belongs.
		 *
		 * Determine if a suitable PG already
		 * exists, or if one needs to be created.
		 */
		pg = (pg_cmt_t *)pghw_place_cpu(cp, hw);
		if (pg == NULL) {
			/*
			 * Create a new one.
			 * Initialize the common...
			 */
			pg = (pg_cmt_t *)pg_create(pg_cmt_class_id);

			/* ... physical ... */
			pghw_init((pghw_t *)pg, cp, hw);

			/*
			 * ... and CMT specific portions of the
			 * structure.
			 */
			pg->cmt_policy = policy;

			/* CMT event callbacks */
			cmt_callback_init((pg_t *)pg);

			bitset_init(&pg->cmt_cpus_actv_set);
			group_create(&pg->cmt_cpus_actv);
		} else {
			ASSERT(IS_CMT_PG(pg));
		}

		/* Add the CPU to the PG */
		pg_cpu_add((pg_t *)pg, cp, pgdata);

		/*
		 * Ensure capacity of the active CPU group/bitset
		 */
		group_expand(&pg->cmt_cpus_actv,
		    GROUP_SIZE(&((pg_t *)pg)->pg_cpus));

		if (cp->cpu_seqid >=
		    bitset_capacity(&pg->cmt_cpus_actv_set)) {
			bitset_resize(&pg->cmt_cpus_actv_set,
			    cp->cpu_seqid + 1);
		}

		/*
		 * Build a lineage of CMT PGs for load balancing / coalescence
		 */
		if (policy & (CMT_BALANCE | CMT_COALESCE)) {
			cpu_cmt_hier[levels++] = pg;
		}

		/* Cache this for later */
		if (hw == PGHW_CACHE)
			pg_cache = (pg_t *)pg;
	}

	group_expand(cmt_pgs, levels);

	if (cmt_root == NULL)
		cmt_root = pg_cmt_lgrp_create(lgrp_plat_root_hand());

	/*
	 * Find the lgrp that encapsulates this CPU's CMT hierarchy
	 */
	lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id);
	if ((lgrp = pg_cmt_find_lgrp(lgrp_handle)) == NULL)
		lgrp = pg_cmt_lgrp_create(lgrp_handle);

	/*
	 * Ascendingly sort the PGs in the lineage by number of CPUs
	 */
	pg_cmt_hier_sort(cpu_cmt_hier, levels);

	/*
	 * Examine the lineage and validate it.
	 * This routine will also try to fix the lineage along with the
	 * rest of the PG hierarchy should it detect an issue.
	 *
	 * If it returns anything other than VALID or REPAIRED, an
	 * unrecoverable error has occurred, and we cannot proceed.
	 */
	lineage_status = pg_cmt_lineage_validate(cpu_cmt_hier, &levels, pgdata);
	if ((lineage_status != CMT_LINEAGE_VALID) &&
	    (lineage_status != CMT_LINEAGE_REPAIRED)) {
		/*
		 * In the case of an unrecoverable error where CMT scheduling
		 * has been disabled, assert that the under construction CPU's
		 * PG data has an empty CMT load balancing lineage.
		 */
		ASSERT((cmt_sched_disabled == 0) ||
		    (GROUP_SIZE(&(pgdata->cmt_pgs)) == 0));
		return;
	}

	/*
	 * For existing PGs in the lineage, verify that the parent is
	 * correct, as the generation in the lineage may have changed
	 * as a result of the sorting. Start the traversal at the top
	 * of the lineage, moving down.
	 */
	for (level = levels - 1; level >= 0; ) {
		int reorg;

		reorg = 0;
		pg = cpu_cmt_hier[level];

		/*
		 * Promote PGs at an incorrect generation into place.
		 */
		while (pg->cmt_parent &&
		    pg->cmt_parent != cpu_cmt_hier[level + 1]) {
			cmt_hier_promote(pg, pgdata);
			reorg++;
		}
		if (reorg > 0)
			level = levels - 1;
		else
			level--;
	}

	/*
	 * For each of the PGs in the CPU's lineage:
	 *	- Add an entry in the CPU sorted CMT PG group
	 *	  which is used for top down CMT load balancing
	 *	- Tie the PG into the CMT hierarchy by connecting
	 *	  it to it's parent and siblings.
	 */
	for (level = 0; level < levels; level++) {
		uint_t		children;
		int		err;

		pg = cpu_cmt_hier[level];
		err = group_add_at(cmt_pgs, pg, levels - level - 1);
		ASSERT(err == 0);

		if (level == 0)
			pgdata->cmt_lineage = (pg_t *)pg;

		if (pg->cmt_siblings != NULL) {
			/* Already initialized */
			ASSERT(pg->cmt_parent == NULL ||
			    pg->cmt_parent == cpu_cmt_hier[level + 1]);
			ASSERT(pg->cmt_siblings == &lgrp->cl_pgs ||
			    ((pg->cmt_parent != NULL) &&
			    pg->cmt_siblings == pg->cmt_parent->cmt_children));
			continue;
		}

		if ((level + 1) == levels) {
			pg->cmt_parent = NULL;

			pg->cmt_siblings = &lgrp->cl_pgs;
			children = ++lgrp->cl_npgs;
			if (cmt_root != lgrp)
				cmt_root->cl_npgs++;
		} else {
			pg->cmt_parent = cpu_cmt_hier[level + 1];

			/*
			 * A good parent keeps track of their children.
			 * The parent's children group is also the PG's
			 * siblings.
			 */
			if (pg->cmt_parent->cmt_children == NULL) {
				pg->cmt_parent->cmt_children =
				    kmem_zalloc(sizeof (group_t), KM_SLEEP);
				group_create(pg->cmt_parent->cmt_children);
			}
			pg->cmt_siblings = pg->cmt_parent->cmt_children;
			children = ++pg->cmt_parent->cmt_nchildren;
		}

		group_expand(pg->cmt_siblings, children);
		group_expand(&cmt_root->cl_pgs, cmt_root->cl_npgs);
	}

	/*
	 * Cache the chip and core IDs in the cpu_t->cpu_physid structure
	 * for fast lookups later.
	 */
	if (cp->cpu_physid) {
		cp->cpu_physid->cpu_chipid =
		    pg_plat_hw_instance_id(cp, PGHW_CHIP);
		cp->cpu_physid->cpu_coreid = pg_plat_get_core_id(cp);

		/*
		 * If this cpu has a PG representing shared cache, then set
		 * cpu_cacheid to that PG's logical id
		 */
		if (pg_cache)
			cp->cpu_physid->cpu_cacheid = pg_cache->pg_id;
	}

	/* CPU0 only initialization */
	if (is_cpu0) {
		is_cpu0 = 0;
		cpu0_lgrp = lgrp;
	}

}
Exemplo n.º 9
0
/*
 * Create a new partition.  On MP systems, this also allocates a
 * kpreempt disp queue for that partition.
 */
int
cpupart_create(psetid_t *psid)
{
	cpupart_t	*pp;

	ASSERT(pool_lock_held());

	pp = kmem_zalloc(sizeof (cpupart_t), KM_SLEEP);
	pp->cp_nlgrploads = lgrp_plat_max_lgrps();
	pp->cp_lgrploads = kmem_zalloc(sizeof (lpl_t) * pp->cp_nlgrploads,
	    KM_SLEEP);

	mutex_enter(&cpu_lock);
	if (cp_numparts == cp_max_numparts) {
		mutex_exit(&cpu_lock);
		kmem_free(pp->cp_lgrploads, sizeof (lpl_t) * pp->cp_nlgrploads);
		pp->cp_lgrploads = NULL;
		kmem_free(pp, sizeof (cpupart_t));
		return (ENOMEM);
	}
	cp_numparts++;
	/* find the next free partition ID */
	while (cpupart_find(CPTOPS(cp_id_next)) != NULL)
		cp_id_next++;
	pp->cp_id = cp_id_next++;
	pp->cp_ncpus = 0;
	pp->cp_cpulist = NULL;
	pp->cp_attr = 0;
	klgrpset_clear(pp->cp_lgrpset);
	pp->cp_kp_queue.disp_maxrunpri = -1;
	pp->cp_kp_queue.disp_max_unbound_pri = -1;
	pp->cp_kp_queue.disp_cpu = NULL;
	pp->cp_gen = 0;
	DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
	*psid = CPTOPS(pp->cp_id);
	disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
	cpupart_kstat_create(pp);
	cpupart_lpl_initialize(pp);

	bitset_init(&pp->cp_cmt_pgs);

	/*
	 * Initialize and size the partition's bitset of halted CPUs.
	 */
	bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
	bitset_resize(&pp->cp_haltset, max_ncpus);

	/*
	 * Pause all CPUs while changing the partition list, to make sure
	 * the clock thread (which traverses the list without holding
	 * cpu_lock) isn't running.
	 */
	pause_cpus(NULL);
	pp->cp_next = cp_list_head;
	pp->cp_prev = cp_list_head->cp_prev;
	cp_list_head->cp_prev->cp_next = pp;
	cp_list_head->cp_prev = pp;
	start_cpus();
	mutex_exit(&cpu_lock);

	return (0);
}