Exemple #1
0
void
waitq_init(waitq_t *wq)
{
	DISP_LOCK_INIT(&wq->wq_lock);
	wq->wq_first = NULL;
	wq->wq_count = 0;
	wq->wq_blocked = B_TRUE;
}
Exemple #2
0
/*
 * Allocate and initialize cpucap structure
 */
static cpucap_t *
cap_alloc(void)
{
	cpucap_t *cap = kmem_zalloc(sizeof (cpucap_t), KM_SLEEP);

	DISP_LOCK_INIT(&cap->cap_usagelock);
	waitq_init(&cap->cap_waitq);

	return (cap);
}
Exemple #3
0
/*
 * Initialize the default partition and kpreempt disp queue.
 */
void
cpupart_initialize_default(void)
{
	lgrp_id_t i;

	cp_list_head = &cp_default;
	cp_default.cp_next = &cp_default;
	cp_default.cp_prev = &cp_default;
	cp_default.cp_id = CP_DEFAULT;
	cp_default.cp_kp_queue.disp_maxrunpri = -1;
	cp_default.cp_kp_queue.disp_max_unbound_pri = -1;
	cp_default.cp_kp_queue.disp_cpu = NULL;
	cp_default.cp_gen = 0;
	cp_default.cp_loadavg.lg_cur = 0;
	cp_default.cp_loadavg.lg_len = 0;
	cp_default.cp_loadavg.lg_total = 0;
	for (i = 0; i < S_LOADAVG_SZ; i++) {
		cp_default.cp_loadavg.lg_loads[i] = 0;
	}
	DISP_LOCK_INIT(&cp_default.cp_kp_queue.disp_lock);
	cp_id_next = CP_DEFAULT + 1;
	cpupart_kstat_create(&cp_default);
	cp_numparts = 1;
	if (cp_max_numparts == 0)	/* allow for /etc/system tuning */
		cp_max_numparts = max_ncpus * 2 + 1;
	/*
	 * Allocate space for cp_default list of lgrploads
	 */
	cpupart_lpl_initialize(&cp_default);

	/*
	 * The initial lpl topology is created in a special lpl list
	 * lpl_bootstrap. It should be copied to cp_default.
	 * NOTE: lpl_topo_bootstrap() also updates CPU0 cpu_lpl pointer to point
	 *	 to the correct lpl in the cp_default.cp_lgrploads list.
	 */
	lpl_topo_bootstrap(cp_default.cp_lgrploads,
	    cp_default.cp_nlgrploads);


	cp_default.cp_attr = PSET_NOESCAPE;
	cp_numparts_nonempty = 1;
	/*
	 * Set t0's home
	 */
	t0.t_lpl = &cp_default.cp_lgrploads[LGRP_ROOTID];

	bitset_init(&cp_default.cp_cmt_pgs);
	bitset_init_fanout(&cp_default.cp_haltset, cp_haltset_fanout);

	bitset_resize(&cp_default.cp_haltset, max_ncpus);
}
Exemple #4
0
/*
 * Create a new partition.  On MP systems, this also allocates a
 * kpreempt disp queue for that partition.
 */
int
cpupart_create(psetid_t *psid)
{
	cpupart_t	*pp;

	ASSERT(pool_lock_held());

	pp = kmem_zalloc(sizeof (cpupart_t), KM_SLEEP);
	pp->cp_nlgrploads = lgrp_plat_max_lgrps();
	pp->cp_lgrploads = kmem_zalloc(sizeof (lpl_t) * pp->cp_nlgrploads,
	    KM_SLEEP);

	mutex_enter(&cpu_lock);
	if (cp_numparts == cp_max_numparts) {
		mutex_exit(&cpu_lock);
		kmem_free(pp->cp_lgrploads, sizeof (lpl_t) * pp->cp_nlgrploads);
		pp->cp_lgrploads = NULL;
		kmem_free(pp, sizeof (cpupart_t));
		return (ENOMEM);
	}
	cp_numparts++;
	/* find the next free partition ID */
	while (cpupart_find(CPTOPS(cp_id_next)) != NULL)
		cp_id_next++;
	pp->cp_id = cp_id_next++;
	pp->cp_ncpus = 0;
	pp->cp_cpulist = NULL;
	pp->cp_attr = 0;
	klgrpset_clear(pp->cp_lgrpset);
	pp->cp_kp_queue.disp_maxrunpri = -1;
	pp->cp_kp_queue.disp_max_unbound_pri = -1;
	pp->cp_kp_queue.disp_cpu = NULL;
	pp->cp_gen = 0;
	DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
	*psid = CPTOPS(pp->cp_id);
	disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
	cpupart_kstat_create(pp);
	cpupart_lpl_initialize(pp);

	bitset_init(&pp->cp_cmt_pgs);

	/*
	 * Initialize and size the partition's bitset of halted CPUs.
	 */
	bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
	bitset_resize(&pp->cp_haltset, max_ncpus);

	/*
	 * Pause all CPUs while changing the partition list, to make sure
	 * the clock thread (which traverses the list without holding
	 * cpu_lock) isn't running.
	 */
	pause_cpus(NULL);
	pp->cp_next = cp_list_head;
	pp->cp_prev = cp_list_head->cp_prev;
	cp_list_head->cp_prev->cp_next = pp;
	cp_list_head->cp_prev = pp;
	start_cpus();
	mutex_exit(&cpu_lock);

	return (0);
}