Пример #1
0
/* Create a vector of N_VECS bitsets, each of N_BITS, and of
   type TYPE.  */
bitset *
bitsetv_alloc (bitset_bindex n_vecs, bitset_bindex n_bits,
	       enum bitset_type type)
{
  size_t vector_bytes;
  size_t bytes;
  bitset *bsetv;
  bitset_bindex i;

  /* Determine number of bytes for each set.  */
  bytes = bitset_bytes (type, n_bits);

  /* If size calculation overflows, memory is exhausted.  */
  if (BITSET_SIZE_MAX / (sizeof (bitset) + bytes) <= n_vecs)
    xalloc_die ();

  /* Allocate vector table at head of bitset array.  */
  vector_bytes = (n_vecs + 1) * sizeof (bitset) + bytes - 1;
  vector_bytes -= vector_bytes % bytes;
  bsetv = xcalloc (1, vector_bytes + bytes * n_vecs);

  for (i = 0; i < n_vecs; i++)
    {
      bsetv[i] = (bitset) (void *) ((char *) bsetv + vector_bytes + i * bytes);

      bitset_init (bsetv[i], n_bits, type);
    }

  /* Null terminate table.  */
  bsetv[i] = 0;
  return bsetv;
}
Пример #2
0
int bitset_resize(bitset_t *bitset, size_t n) {
    Status_t	status;
	size_t orig_nbits = bitset->nbits_m;
	size_t orig_nset = bitset->nset_m;
	size_t orig_nwords = bitset->nwords_m;
	uint32_t *orig_bits = bitset->bits_m;

	if (n == bitset->nbits_m) return 1;

	bitset->bits_m = NULL;

	if (!bitset_init(bitset->pool_m, bitset, n)) {
		bitset->nbits_m = orig_nbits;
		bitset->nset_m = orig_nset;
		bitset->nwords_m = orig_nwords;
		bitset->bits_m = orig_bits;
		return 0;
	}

	if (!orig_bits) return 1;

	if (n > orig_nbits) {
		memcpy(bitset->bits_m, orig_bits, orig_nwords*sizeof(uint32_t));
		bitset->nset_m = orig_nset;
	} else {
		memcpy(bitset->bits_m, orig_bits, bitset->nwords_m*sizeof(uint32_t));
		bitset->nset_m = count_nset(bitset);
	}

	if ((status = vs_pool_free(bitset->pool_m, orig_bits)) != VSTATUS_OK) {
		IB_LOG_ERRORRC("can't free allocated space for bitset, rc:", status);
	}
	return 1;
}
Пример #3
0
int main(int argc, char **argv)
{
  bitset_size_t pos;
  bitset_t *bitset = bitset_init(NULL, DATA_SIZE);
  bitset_set(bitset, 0);
  bitset_set(bitset, 5);  
  bitset_set(bitset, 38);  
  bitset_set(bitset, 31);  
  bitset_set(bitset, 32);
  bitset_print(bitset);

  bitset_unset_all(bitset);
  bitset_print(bitset);

  bitset_set_all(bitset);
  bitset_print(bitset);

  /* pos = -1; */
  /* while ((pos = bitset_find_first_set_since(bitset, pos + 1)) */
  /* 	 != bitset_npos) */
  /*   printf("%d is set\n", pos); */

  /* pos = -1; */
  /* while ((pos = bitset_find_first_unset_since(bitset, pos + 1)) */
  /* 	 != bitset_npos) */
  /*   printf("%d is unset\n", pos); */

  bitset_delete(bitset);
  return 0;
}
Пример #4
0
void networkReadCallbackPerByte(NetworkAddress networkAddr, ADDRINT start, size_t length, void *v)
{
    int tag;

    assert(taintGen);
    bitset *s = bitset_init(NUMBER_OF_TAINT_MARKS);

    ADDRINT end = start + length;
    for(ADDRINT addr = start; addr < end; addr++) {
        tag = taintGen->nextTaintMark();
        bitset_set_bit(s, tag);
        memTaintMap[addr] = bitset_copy(s);
        bitset_reset(s);
    }
    bitset_free(s);

    ADDRINT currAddress = start;
    while (currAddress < end) {
        taintAssignmentLog << tag << " - [" << networkAddr.strAddress << "] -> " << std::hex << currAddress++ << "\n";
    }
    taintAssignmentLog.flush();

#ifdef TRACE
    if(tracing) {
        log << "\t" << std::hex << start << "-" << std::hex << end - 1 << " <- read\n";
        log.flush();
    }
#endif
}
Пример #5
0
void networkReadCallbackPerRead(NetworkAddress networkAddr, ADDRINT start, size_t length, void *v)
{
    int tag;
    bitset *s = bitset_init(NUMBER_OF_TAINT_MARKS);
    assert(taintGen);
    tag = taintGen->nextTaintMark();
    //taint entire buffer with 1 mark
    bitset_set_bit(s, tag);

    ADDRINT end = start + length;
    for(ADDRINT addr = start; addr < end; addr++) {
        memTaintMap[addr] = bitset_copy(s);
    }
    bitset_free(s);

    taintAssignmentLog << tag << " - [" << networkAddr.strAddress << "] -> " << std::hex << start << "-" << std::hex << end - 1<< "\n";
    taintAssignmentLog.flush();

#ifdef TRACE
    if(tracing) {
        log << "\t" << std::hex << start << "-" << std::hex << end - 1 << " <- read(" << tag << ")\n";
        log.flush();
    }
#endif
}
Пример #6
0
/* Init context bitset for a radio device (hwarc/whci) */
void
uwba_init_ctxt_id(uwba_dev_t *uwba_dev)
{
	bitset_init(&uwba_dev->ctxt_bits); /* this bzero sizeof(bitset_t) */
	bitset_resize(&uwba_dev->ctxt_bits, 256); /* alloc mem */
	bitset_add(&uwba_dev->ctxt_bits, 0);
	bitset_add(&uwba_dev->ctxt_bits, 255);
}
Пример #7
0
/**
 * @brief Creates a copy of a bitset data structure.
 * @details Initializes `set` with the same size of `source` and copies all values of `source` to `set`.
 * @param set Pointer to an unitialized bitset data structure.
 * @param source Pointer to an itialized bitset data structure that is to be copied.
 */
void bitset_init_copy(bitset_t *set, const bitset_t const *source)
{
#ifdef BITSET_ASSERTIONS
    assert(set);
    assert(source);
#endif

    bitset_init(set, source->max);
    bitset_copy(set, source);
}
Пример #8
0
int bitset_alloc(bitset** bs, unsigned int size) {
    (*bs) = malloc(sizeof(struct bitset));
    if (*bs == NULL) return BITSET_ERROR;
    ((*bs)->bs) = malloc(BITNSIZE(size));

    if ((*bs)->bs == NULL) {
        free((*bs));
        return BITSET_ERROR;
    } else {
        return bitset_init((*bs), size);
    }
}
Пример #9
0
/**
 * @brief Creates a new bitset data structure.
 * @details Allocates memory for a bitset that can contain values from the range `[0, num_values - 1]` and initializes it using bitset_init.
 * @param num_values Number of values the set can store.
 * @returns Pointer to a bitset data structure.
 * @remark Memory needs to be free'd by bitset_free.
 */
bitset_t *bitset_new(bitset_index_t num_values)
{
    bitset_t *set = (bitset_t *) malloc(sizeof(bitset_t));
    if(set == NULL)
    {
        fprintf(stderr, "[bitset] Error: could not allocate memory to store bitset\n");
        exit(0);
    }
    
    bitset_init(set, num_values);
    return set;
}
Пример #10
0
/*
 * Initialize the default partition and kpreempt disp queue.
 */
void
cpupart_initialize_default(void)
{
	lgrp_id_t i;

	cp_list_head = &cp_default;
	cp_default.cp_next = &cp_default;
	cp_default.cp_prev = &cp_default;
	cp_default.cp_id = CP_DEFAULT;
	cp_default.cp_kp_queue.disp_maxrunpri = -1;
	cp_default.cp_kp_queue.disp_max_unbound_pri = -1;
	cp_default.cp_kp_queue.disp_cpu = NULL;
	cp_default.cp_gen = 0;
	cp_default.cp_loadavg.lg_cur = 0;
	cp_default.cp_loadavg.lg_len = 0;
	cp_default.cp_loadavg.lg_total = 0;
	for (i = 0; i < S_LOADAVG_SZ; i++) {
		cp_default.cp_loadavg.lg_loads[i] = 0;
	}
	DISP_LOCK_INIT(&cp_default.cp_kp_queue.disp_lock);
	cp_id_next = CP_DEFAULT + 1;
	cpupart_kstat_create(&cp_default);
	cp_numparts = 1;
	if (cp_max_numparts == 0)	/* allow for /etc/system tuning */
		cp_max_numparts = max_ncpus * 2 + 1;
	/*
	 * Allocate space for cp_default list of lgrploads
	 */
	cpupart_lpl_initialize(&cp_default);

	/*
	 * The initial lpl topology is created in a special lpl list
	 * lpl_bootstrap. It should be copied to cp_default.
	 * NOTE: lpl_topo_bootstrap() also updates CPU0 cpu_lpl pointer to point
	 *	 to the correct lpl in the cp_default.cp_lgrploads list.
	 */
	lpl_topo_bootstrap(cp_default.cp_lgrploads,
	    cp_default.cp_nlgrploads);


	cp_default.cp_attr = PSET_NOESCAPE;
	cp_numparts_nonempty = 1;
	/*
	 * Set t0's home
	 */
	t0.t_lpl = &cp_default.cp_lgrploads[LGRP_ROOTID];

	bitset_init(&cp_default.cp_cmt_pgs);
	bitset_init_fanout(&cp_default.cp_haltset, cp_haltset_fanout);

	bitset_resize(&cp_default.cp_haltset, max_ncpus);
}
Пример #11
0
/*
 * Create new device address map
 *
 * name:		map name (kstat unique)
 * size:		max # of map entries
 * mode:		style of address reports: per-address or fullset
 * stable_usec:		# of quiescent microseconds before report/map is stable
 *
 * activate_arg:	address provider activation-callout private
 * activate_cb:		address provider activation callback handler
 * deactivate_cb:	address provider deactivation callback handler
 *
 * config_arg:		configuration-callout private
 * config_cb:		class configuration callout
 * unconfig_cb:		class unconfiguration callout
 *
 * damapp:		pointer to map handle (return)
 *
 * Returns:	DAM_SUCCESS
 *		DAM_EINVAL	Invalid argument(s)
 *		DAM_FAILURE	General failure
 */
int
damap_create(char *name, damap_rptmode_t mode, int map_opts,
    int stable_usec, void *activate_arg, damap_activate_cb_t activate_cb,
    damap_deactivate_cb_t deactivate_cb,
    void *config_arg, damap_configure_cb_t configure_cb,
    damap_unconfig_cb_t unconfig_cb,
    damap_t **damapp)
{
	dam_t *mapp;

	if (configure_cb == NULL || unconfig_cb == NULL || name == NULL)
		return (DAM_EINVAL);

	mapp = kmem_zalloc(sizeof (*mapp), KM_SLEEP);
	mapp->dam_options = map_opts;
	mapp->dam_stable_ticks = drv_usectohz(stable_usec);
	mapp->dam_size = 0;
	mapp->dam_rptmode = mode;
	mapp->dam_activate_arg = activate_arg;
	mapp->dam_activate_cb = (activate_cb_t)activate_cb;
	mapp->dam_deactivate_cb = (deactivate_cb_t)deactivate_cb;
	mapp->dam_config_arg = config_arg;
	mapp->dam_configure_cb = (configure_cb_t)configure_cb;
	mapp->dam_unconfig_cb = (unconfig_cb_t)unconfig_cb;
	mapp->dam_name = i_ddi_strdup(name, KM_SLEEP);
	mutex_init(&mapp->dam_lock, NULL, MUTEX_DRIVER, NULL);
	cv_init(&mapp->dam_sync_cv, NULL, CV_DRIVER, NULL);
	bitset_init(&mapp->dam_active_set);
	bitset_init(&mapp->dam_stable_set);
	bitset_init(&mapp->dam_report_set);
	*damapp = (damap_t *)mapp;

	DTRACE_PROBE5(damap__create,
	    char *, mapp->dam_name, damap_t *, mapp,
	    damap_rptmode_t, mode, int, map_opts, int, stable_usec);

	return (DAM_SUCCESS);
}
Пример #12
0
int main(int argc, char** argv)
{
	
	int show_details = 0;

	printf("\nShow details,run: %s details.\n",argv[0]);
	if(argc > 1)
	{
		if(strcmp("details",argv[1]) == 0)
		{
			show_details = 1;	
		}
	}

	bitset *s = bitset_init(200);
	bitset_set_bit(s, 0);
	bitset_set_bit(s, 1);
	bitset_set_bit(s, 2);
	bitset_set_bit(s, 3);
	bitset_set_bit(s, 4);
	bitset_set_bit(s, 5);
	bitset_set_bit(s, 6);
	bitset_set_bit(s, 7);
	bitset_set_bit(s, 8);
	bitset_set_bit(s, 9);
	bitset_set_bit(s, 10);
	bitset_print(s);
	bitset_clear_bit(s, 1);
	bitset_clear_bit(s, 4);
	bitset_clear_bit(s, 7);
	bitset_print(s);
	
	size_t pos = bitset_get_first_unused_bit_pos(s);
	printf("pos : %d\n", pos);
	bitset_set_bit(s, pos);
	pos = bitset_get_first_unused_bit_pos(s);
	printf("pos : %d\n", pos);
	
	bitset_free(s);
	run(1);

    return (EXIT_SUCCESS);
}
Пример #13
0
int fdevent_freebsd_kqueue_init(fdevents * ev)
{
	ev->type = FDEVENT_HANDLER_FREEBSD_KQUEUE;
#define SET(x) \
	ev->x = fdevent_freebsd_kqueue_##x;

	SET(free);
	SET(poll);
	SET(reset);

	SET(event_del);
	SET(event_add);

	SET(event_next_fdndx);
	SET(event_get_fd);
	SET(event_get_revent);

	ev->kq_fd = -1;

	ev->kq_results = calloc(ev->maxfds, sizeof(*ev->kq_results));
	ev->kq_bevents = bitset_init(ev->maxfds);

	/*
	 * check that kqueue works 
	 */

	if (-1 == (ev->kq_fd = kqueue()))
	{
		fprintf(stderr,
				"%s.%d: kqueue failed (%s), try to set server.event-handler = \"poll\" or \"select\"\n",
				__FILE__, __LINE__, strerror(errno));

		return -1;
	}

	close(ev->kq_fd);
	ev->kq_fd = -1;

	return 0;
}
Пример #14
0
int fdevent_linux_rtsig_init(fdevents * ev)
{
	ev->type = FDEVENT_HANDLER_LINUX_RTSIG;
#define SET(x) \
	ev->x = fdevent_linux_rtsig_##x;

	SET(free);
	SET(poll);

	SET(event_del);
	SET(event_add);

	SET(event_next_fdndx);
	SET(fcntl_set);
	SET(event_get_fd);
	SET(event_get_revent);

	ev->signum = SIGRTMIN + 1;

	sigemptyset(&(ev->sigset));
	sigaddset(&(ev->sigset), ev->signum);
	sigaddset(&(ev->sigset), SIGIO);
	if (-1 == sigprocmask(SIG_BLOCK, &(ev->sigset), NULL))
	{
		fprintf(stderr,
				"%s.%d: sigprocmask failed (%s), try to set server.event-handler = \"poll\" or \"select\"\n",
				__FILE__, __LINE__, strerror(errno));

		return -1;
	}

	ev->in_sigio = 1;

	ev->sigbset = bitset_init(ev->maxfds);

	return 0;
}
Пример #15
0
END_TEST

START_TEST(test_bitset_getset)
{
    int i, j;

    bitset_init(bs, NCOL1);
    for (i = 0, j = 0; i < NCOL1; i++) {
        if (i == cols[j]) {
            bitset_set(bs, i, 1);
            j++;
            ck_assert_int_eq(bs->count, j);
        } else { /* only bits specified are set */
            ck_assert_int_eq(bitset_get(bs, i), 0);
        }
    }

    for (j=0; j < 4; j++) { /* set these bits back to 0 */
        ck_assert_int_eq(bitset_get(bs, cols[j]), 1);
        bitset_set(bs, cols[j], 0);
        ck_assert_int_eq(bitset_get(bs, cols[j]), 0);
        ck_assert_int_eq(bs->count, 3 - j);
    }
}
Пример #16
0
void test_do(test_context_t *test)
{
    
    bitset_t *set;
    int i;
    
    
    test_mark(test, "bitset functions");
    test_group_start(test, "Setup and sanity check");
    {
        
        set = bitset_init(SET_SIZE);
        test_not_null(test, "Sanity check: allocated bitset", set);
               
    }
    test_group_end(test);
    
    
    test_group_start(test, "set / get");
    {
        int set1[6] = { 0, 1, 3, 5, SET_SIZE -1, SET_SIZE - 3 };
        int set2[6] = { 2, 4, 6, 10, 11, SET_SIZE - 2 };
        
        /* set1 is TRUE, set2 is false */
        for(i = 0; i < 6; i++) bitset_set(set, set1[i], TRUE);
        for(i = 0; i < 6; i++) bitset_set(set, set2[i], FALSE);
        
        /* now check it */        
        for(i = 0; i < 6; i++) {
            test_equal(test, "set1 members are TRUE", bitset_get(set, set1[i]), TRUE);
            test_equal(test, "set2 members are FALSE", bitset_get(set, set2[i]), FALSE);
        }
        
        /* reverse it and try it again */
        for(i = 0; i < 6; i++) bitset_set(set, set1[i], FALSE);
        for(i = 0; i < 6; i++) bitset_set(set, set2[i], TRUE);
        
        for(i = 0; i < 6; i++) {
            test_equal(test, "set1 members are FALSE (2)", bitset_get(set, set1[i]), FALSE);
            test_equal(test, "set2 members are TRUE  (2)", bitset_get(set, set2[i]), TRUE);
        }
    }
    test_group_end(test);
    
 
    test_group_start(test, "set all");
    {
        
        /* ALL TRUE */
        bitset_set_all(set, TRUE);
        bitset_set(set, 5, FALSE); /* special one to catch stuck-on bugs ? */
        
        for(i = 0; i < SET_SIZE; i++) {
            if(i != 5)
                test_equal(test, "set_all (TRUE)", bitset_get(set, i), TRUE);
            else
                test_equal(test, "set_all (one is FALSE)", bitset_get(set, i), FALSE);
        }
        
        /* ALL FALSE */
        bitset_set_all(set, FALSE);
        bitset_set(set, 31, TRUE); /* see above */
        
        for(i = 0; i < SET_SIZE; i++) {
            if(i != 31)
                test_equal(test, "set_all (FALSE)", bitset_get(set, i), FALSE);
            else
                test_equal(test, "set_all (one is TRUE)", bitset_get(set, i), TRUE);
        }
        
        
    }
    test_group_end(test);    

}
Пример #17
0
/*
 * CMT class callback for a new CPU entering the system
 *
 * This routine operates on the CPU specific processor group data (for the CPU
 * being initialized). The argument "pgdata" is a reference to the CPU's PG
 * data to be constructed.
 *
 * cp->cpu_pg is used by the dispatcher to access the CPU's PG data
 * references a "bootstrap" structure. pg_cmt_cpu_init() and the routines it
 * calls must be careful to operate only on the "pgdata" argument, and not
 * cp->cpu_pg.
 */
static void
pg_cmt_cpu_init(cpu_t *cp, cpu_pg_t *pgdata)
{
	pg_cmt_t	*pg;
	group_t		*cmt_pgs;
	int		levels, level;
	pghw_type_t	hw;
	pg_t		*pg_cache = NULL;
	pg_cmt_t	*cpu_cmt_hier[PGHW_NUM_COMPONENTS];
	lgrp_handle_t	lgrp_handle;
	cmt_lgrp_t	*lgrp;
	cmt_lineage_validation_t	lineage_status;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(pg_cpu_is_bootstrapped(cp));

	if (cmt_sched_disabled)
		return;

	/*
	 * A new CPU is coming into the system.
	 * Interrogate the platform to see if the CPU
	 * has any performance or efficiency relevant
	 * sharing relationships
	 */
	cmt_pgs = &pgdata->cmt_pgs;
	pgdata->cmt_lineage = NULL;

	bzero(cpu_cmt_hier, sizeof (cpu_cmt_hier));
	levels = 0;
	for (hw = PGHW_START; hw < PGHW_NUM_COMPONENTS; hw++) {

		pg_cmt_policy_t	policy;

		/*
		 * We're only interested in the hw sharing relationships
		 * for which we know how to optimize.
		 */
		policy = pg_cmt_policy(hw);
		if (policy == CMT_NO_POLICY ||
		    pg_plat_hw_shared(cp, hw) == 0)
			continue;

		/*
		 * We will still create the PGs for hardware sharing
		 * relationships that have been blacklisted, but won't
		 * implement CMT thread placement optimizations against them.
		 */
		if (cmt_hw_blacklisted[hw] == 1)
			policy = CMT_NO_POLICY;

		/*
		 * Find (or create) the PG associated with
		 * the hw sharing relationship in which cp
		 * belongs.
		 *
		 * Determine if a suitable PG already
		 * exists, or if one needs to be created.
		 */
		pg = (pg_cmt_t *)pghw_place_cpu(cp, hw);
		if (pg == NULL) {
			/*
			 * Create a new one.
			 * Initialize the common...
			 */
			pg = (pg_cmt_t *)pg_create(pg_cmt_class_id);

			/* ... physical ... */
			pghw_init((pghw_t *)pg, cp, hw);

			/*
			 * ... and CMT specific portions of the
			 * structure.
			 */
			pg->cmt_policy = policy;

			/* CMT event callbacks */
			cmt_callback_init((pg_t *)pg);

			bitset_init(&pg->cmt_cpus_actv_set);
			group_create(&pg->cmt_cpus_actv);
		} else {
			ASSERT(IS_CMT_PG(pg));
		}

		/* Add the CPU to the PG */
		pg_cpu_add((pg_t *)pg, cp, pgdata);

		/*
		 * Ensure capacity of the active CPU group/bitset
		 */
		group_expand(&pg->cmt_cpus_actv,
		    GROUP_SIZE(&((pg_t *)pg)->pg_cpus));

		if (cp->cpu_seqid >=
		    bitset_capacity(&pg->cmt_cpus_actv_set)) {
			bitset_resize(&pg->cmt_cpus_actv_set,
			    cp->cpu_seqid + 1);
		}

		/*
		 * Build a lineage of CMT PGs for load balancing / coalescence
		 */
		if (policy & (CMT_BALANCE | CMT_COALESCE)) {
			cpu_cmt_hier[levels++] = pg;
		}

		/* Cache this for later */
		if (hw == PGHW_CACHE)
			pg_cache = (pg_t *)pg;
	}

	group_expand(cmt_pgs, levels);

	if (cmt_root == NULL)
		cmt_root = pg_cmt_lgrp_create(lgrp_plat_root_hand());

	/*
	 * Find the lgrp that encapsulates this CPU's CMT hierarchy
	 */
	lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id);
	if ((lgrp = pg_cmt_find_lgrp(lgrp_handle)) == NULL)
		lgrp = pg_cmt_lgrp_create(lgrp_handle);

	/*
	 * Ascendingly sort the PGs in the lineage by number of CPUs
	 */
	pg_cmt_hier_sort(cpu_cmt_hier, levels);

	/*
	 * Examine the lineage and validate it.
	 * This routine will also try to fix the lineage along with the
	 * rest of the PG hierarchy should it detect an issue.
	 *
	 * If it returns anything other than VALID or REPAIRED, an
	 * unrecoverable error has occurred, and we cannot proceed.
	 */
	lineage_status = pg_cmt_lineage_validate(cpu_cmt_hier, &levels, pgdata);
	if ((lineage_status != CMT_LINEAGE_VALID) &&
	    (lineage_status != CMT_LINEAGE_REPAIRED)) {
		/*
		 * In the case of an unrecoverable error where CMT scheduling
		 * has been disabled, assert that the under construction CPU's
		 * PG data has an empty CMT load balancing lineage.
		 */
		ASSERT((cmt_sched_disabled == 0) ||
		    (GROUP_SIZE(&(pgdata->cmt_pgs)) == 0));
		return;
	}

	/*
	 * For existing PGs in the lineage, verify that the parent is
	 * correct, as the generation in the lineage may have changed
	 * as a result of the sorting. Start the traversal at the top
	 * of the lineage, moving down.
	 */
	for (level = levels - 1; level >= 0; ) {
		int reorg;

		reorg = 0;
		pg = cpu_cmt_hier[level];

		/*
		 * Promote PGs at an incorrect generation into place.
		 */
		while (pg->cmt_parent &&
		    pg->cmt_parent != cpu_cmt_hier[level + 1]) {
			cmt_hier_promote(pg, pgdata);
			reorg++;
		}
		if (reorg > 0)
			level = levels - 1;
		else
			level--;
	}

	/*
	 * For each of the PGs in the CPU's lineage:
	 *	- Add an entry in the CPU sorted CMT PG group
	 *	  which is used for top down CMT load balancing
	 *	- Tie the PG into the CMT hierarchy by connecting
	 *	  it to it's parent and siblings.
	 */
	for (level = 0; level < levels; level++) {
		uint_t		children;
		int		err;

		pg = cpu_cmt_hier[level];
		err = group_add_at(cmt_pgs, pg, levels - level - 1);
		ASSERT(err == 0);

		if (level == 0)
			pgdata->cmt_lineage = (pg_t *)pg;

		if (pg->cmt_siblings != NULL) {
			/* Already initialized */
			ASSERT(pg->cmt_parent == NULL ||
			    pg->cmt_parent == cpu_cmt_hier[level + 1]);
			ASSERT(pg->cmt_siblings == &lgrp->cl_pgs ||
			    ((pg->cmt_parent != NULL) &&
			    pg->cmt_siblings == pg->cmt_parent->cmt_children));
			continue;
		}

		if ((level + 1) == levels) {
			pg->cmt_parent = NULL;

			pg->cmt_siblings = &lgrp->cl_pgs;
			children = ++lgrp->cl_npgs;
			if (cmt_root != lgrp)
				cmt_root->cl_npgs++;
		} else {
			pg->cmt_parent = cpu_cmt_hier[level + 1];

			/*
			 * A good parent keeps track of their children.
			 * The parent's children group is also the PG's
			 * siblings.
			 */
			if (pg->cmt_parent->cmt_children == NULL) {
				pg->cmt_parent->cmt_children =
				    kmem_zalloc(sizeof (group_t), KM_SLEEP);
				group_create(pg->cmt_parent->cmt_children);
			}
			pg->cmt_siblings = pg->cmt_parent->cmt_children;
			children = ++pg->cmt_parent->cmt_nchildren;
		}

		group_expand(pg->cmt_siblings, children);
		group_expand(&cmt_root->cl_pgs, cmt_root->cl_npgs);
	}

	/*
	 * Cache the chip and core IDs in the cpu_t->cpu_physid structure
	 * for fast lookups later.
	 */
	if (cp->cpu_physid) {
		cp->cpu_physid->cpu_chipid =
		    pg_plat_hw_instance_id(cp, PGHW_CHIP);
		cp->cpu_physid->cpu_coreid = pg_plat_get_core_id(cp);

		/*
		 * If this cpu has a PG representing shared cache, then set
		 * cpu_cacheid to that PG's logical id
		 */
		if (pg_cache)
			cp->cpu_physid->cpu_cacheid = pg_cache->pg_id;
	}

	/* CPU0 only initialization */
	if (is_cpu0) {
		is_cpu0 = 0;
		cpu0_lgrp = lgrp;
	}

}
Пример #18
0
/*
 * Create a new partition.  On MP systems, this also allocates a
 * kpreempt disp queue for that partition.
 */
int
cpupart_create(psetid_t *psid)
{
	cpupart_t	*pp;

	ASSERT(pool_lock_held());

	pp = kmem_zalloc(sizeof (cpupart_t), KM_SLEEP);
	pp->cp_nlgrploads = lgrp_plat_max_lgrps();
	pp->cp_lgrploads = kmem_zalloc(sizeof (lpl_t) * pp->cp_nlgrploads,
	    KM_SLEEP);

	mutex_enter(&cpu_lock);
	if (cp_numparts == cp_max_numparts) {
		mutex_exit(&cpu_lock);
		kmem_free(pp->cp_lgrploads, sizeof (lpl_t) * pp->cp_nlgrploads);
		pp->cp_lgrploads = NULL;
		kmem_free(pp, sizeof (cpupart_t));
		return (ENOMEM);
	}
	cp_numparts++;
	/* find the next free partition ID */
	while (cpupart_find(CPTOPS(cp_id_next)) != NULL)
		cp_id_next++;
	pp->cp_id = cp_id_next++;
	pp->cp_ncpus = 0;
	pp->cp_cpulist = NULL;
	pp->cp_attr = 0;
	klgrpset_clear(pp->cp_lgrpset);
	pp->cp_kp_queue.disp_maxrunpri = -1;
	pp->cp_kp_queue.disp_max_unbound_pri = -1;
	pp->cp_kp_queue.disp_cpu = NULL;
	pp->cp_gen = 0;
	DISP_LOCK_INIT(&pp->cp_kp_queue.disp_lock);
	*psid = CPTOPS(pp->cp_id);
	disp_kp_alloc(&pp->cp_kp_queue, v.v_nglobpris);
	cpupart_kstat_create(pp);
	cpupart_lpl_initialize(pp);

	bitset_init(&pp->cp_cmt_pgs);

	/*
	 * Initialize and size the partition's bitset of halted CPUs.
	 */
	bitset_init_fanout(&pp->cp_haltset, cp_haltset_fanout);
	bitset_resize(&pp->cp_haltset, max_ncpus);

	/*
	 * Pause all CPUs while changing the partition list, to make sure
	 * the clock thread (which traverses the list without holding
	 * cpu_lock) isn't running.
	 */
	pause_cpus(NULL);
	pp->cp_next = cp_list_head;
	pp->cp_prev = cp_list_head->cp_prev;
	cp_list_head->cp_prev->cp_next = pp;
	cp_list_head->cp_prev = pp;
	start_cpus();
	mutex_exit(&cpu_lock);

	return (0);
}
Пример #19
0
void nez_EmitInstruction(NezVMInstruction* ir, ByteCodeLoader *loader, ParsingContext context) {
  switch(ir->op) {
    case NEZVM_OP_JUMP:
    case NEZVM_OP_IFFAIL: {
      ir->arg = Loader_Read16(loader);
      break;
    }
    case NEZVM_OP_CALL: {
      ir->arg = Loader_Read16(loader);
      context->call_table[ir->arg] = Loader_Read32(loader);
      break;
    }
    case NEZVM_OP_CHAR: {
      ir->arg = loader->input[loader->info->pos++];
      break;
    }
    case NEZVM_OP_NOTCHAR: {
      ir->arg = Loader_Read16(loader);
      context->str_table[ir->arg].c = loader->input[loader->info->pos++];
      context->str_table[ir->arg].jump = Loader_Read32(loader);
      context->str_table[ir->arg].type = 0;
      break;
    }
    case NEZVM_OP_CHARMAP: {
      ir->arg = Loader_Read16(loader);
      int len = Loader_Read16(loader);
      context->set_table[ir->arg].set = (bitset_t *)malloc(sizeof(bitset_t));
      bitset_init(context->set_table[ir->arg].set);
      for (int i = 0; i < len; i++) {
        unsigned char c = loader->input[loader->info->pos++];
        bitset_set(context->set_table[ir->arg].set, c);
      }
      context->set_table[ir->arg].jump = Loader_Read32(loader);
      break;
    }
    case NEZVM_OP_OPTIONALCHARMAP:
    case NEZVM_OP_ZEROMORECHARMAP: {
      ir->arg = Loader_Read16(loader);
      assert(ir->arg >= 0 && ir->arg < context->set_table_size);
      int len = Loader_Read16(loader);
      context->set_table[ir->arg].set = (bitset_t *)malloc(sizeof(bitset_t));
      bitset_init(context->set_table[ir->arg].set);
      for (int i = 0; i < len; i++) {
        char c = loader->input[loader->info->pos++];
        bitset_set(context->set_table[ir->arg].set, (unsigned int)c);
      }
      fprintf(stderr, "<%d> %d\n", ir->arg, context->set_table[ir->arg].jump);
      break;
    }
    case NEZVM_OP_STRING:
    case NEZVM_OP_NOTSTRING:
     {
      ir->arg = Loader_Read16(loader);
      context->str_table[ir->arg].str = Loader_ReadString(loader);
      context->str_table[ir->arg].jump = Loader_Read32(loader);
      context->str_table[ir->arg].type = 1;
      fprintf(stderr, "<%d> %s %d\n", ir->arg, context->str_table[ir->arg].str->text, context->str_table[ir->arg].jump);
      break;
    }
    // case NEZVM_OP_OPTIONALSTRING:
    //  {
    //   ir->arg = loader->input[loader->info->pos++];
    //   context->str_table[ir->arg].str = Loader_ReadString(loader);
    //   context->str_table[ir->arg].type = 1;
    //  }
    //  break;
  }
}