コード例 #1
0
ファイル: speed_test.hpp プロジェクト: royertiago/MAC6711-src
 int peek_key( std::mt19937 & rng ) {
     std::uniform_int_distribution<> new_index(0, keys.size() - 1);
     int index = new_index(rng);
     while( !keys[index].second )
         index = new_index(rng);
     return keys[index].first;
 }
コード例 #2
0
value* ssa_rename::rename_def(node *n, value* v) {
	unsigned index;

	if (v->is_lds_access()) {
		index = new_index(lds_rw_count, v);
		set_index(rename_lds_rw_stack.top(), v, index);
	} else {
		index = new_index(def_count, v);
		set_index(rename_stack.top(), v, index);
	}
	value *r = sh.get_value_version(v, index);
	return r;
}
コード例 #3
0
value* ssa_rename::rename_use(node *n, value* v) {
	if (v->version)
		return v;
	unsigned index;
	if (v->is_lds_access()) {
		index = get_index(rename_lds_rw_stack.top(), v);
	} else if (v->is_lds_oq()) {
		index = new_index(lds_oq_count, v);
		set_index(rename_lds_oq_stack.top(), v, index);
	} else {
		index = get_index(rename_stack.top(), v);
	}

	v = sh.get_value_version(v, index);

	// if (alu) instruction is predicated and source arg comes from psi node
	// (that is, from another predicated instruction through its psi node),
	// we can try to select the corresponding source value directly
	if (n->pred && v->def && v->def->subtype == NST_PSI) {
		assert(n->subtype == NST_ALU_INST);
		alu_node *an = static_cast<alu_node*>(n);
		node *pn = v->def;
		// FIXME make it more generic ???
		if (pn->src.size() == 6) {
			if (pn->src[3] == n->pred) {
				value* ps = sh.get_pred_sel(an->bc.pred_sel - PRED_SEL_0);
				if (pn->src[4] == ps)
					return pn->src[5];
				else
					return pn->src[2];
			}
		}
	}
	return v;
}
コード例 #4
0
ファイル: index.c プロジェクト: edgar-pek/PerspicuOS
int
index_read(FILE *fp, PkgNodePtr papa)
{
    char name[127], pathto[255], prefix[255], comment[255], descr[127], maint[127], cats[511], deps[2048 * 8];
    int volume;
    PkgNodePtr i;

    while (index_parse(fp, name, pathto, prefix, comment, descr, maint, cats, deps, &volume) != EOF) {
	char *cp, *cp2, tmp[1024];
	IndexEntryPtr idx;

	idx = new_index(name, pathto, prefix, comment, descr, maint, deps, volume);
	/* For now, we only add things to menus if they're in categories.  Keywords are ignored */
	for (cp = strcpy(tmp, cats); (cp2 = strchr(cp, ' ')) != NULL; cp = cp2 + 1) {
	    *cp2 = '\0';
	    index_register(papa, cp, idx);
	}
	index_register(papa, cp, idx);

	/* Add to special "All" category */
	index_register(papa, "All", idx);
    }

    /* Adjust dependency counts */
    for (i = papa->kids; i != NULL; i = i->next)
	if (strcmp(i->name, "All") == 0)
	    break;
    for (i = i->kids; i != NULL; i = i->next)
	if (((IndexEntryPtr)i->data)->installed)
	    index_recorddeps(TRUE, papa, i->data);

    return 0;
}
コード例 #5
0
/**
 * @brief Changes the default animation name of the sprite.
 * @param default_animation_name The new default animation name.
 */
void SpriteModel::set_default_animation_name(
    const QString& default_animation_name) {

  QString old_default_animation_name = get_default_animation_name();

  if (default_animation_name == old_default_animation_name) {
    return;
  }

  sprite.set_default_animation_name(default_animation_name.toStdString());

  emit default_animation_changed(
        old_default_animation_name, default_animation_name);

  // Notify data change
  Index old_index(old_default_animation_name);
  Index new_index(default_animation_name);

  if (animation_exists(old_index)) {
    QModelIndex model_index = get_model_index(old_index);
    emit dataChanged(model_index, model_index);
  }

  if (animation_exists(new_index)) {
    QModelIndex model_index = get_model_index(new_index);
    emit dataChanged(model_index, model_index);
  }
}
コード例 #6
0
ファイル: index.c プロジェクト: NSSX/fdfgt
void draw_with_tab_on_img(char ***coord, int i, int maxline, t_struct *param)
{
  int index;
  int futurindex;

  futurindex = 0;
  index = 0;
  while(index + 1 < i)
    {
      if(index + 1 < i)
        {
          if(coord[index + 1][2][0] != '\n')
            first_part(coord, index, param);
          else
            futurindex++;
        }
      if(index + maxline + 1 < i)
        {
          if(coord[index + maxline + 1][2][0] != '\n')
            second_part(coord, index, maxline, param);
          else
            futurindex++;
        }
      index = new_index(index, futurindex);
      futurindex = new_futurindex(index, futurindex);
    }
}
コード例 #7
0
 std::string generate_default(index_tuple const & index) const
 {
   std::string rhs = tree_parsing::evaluate_expression(*info_.statement, info_.root_idx, index, 0, *info_.mapping, tree_parsing::RHS_NODE_TYPE);
   std::string new_i = index.i + "- ((" + rhs + "<0)?" + rhs + ":0)";
   std::string new_j = index.i + "+ ((" + rhs + ">0)?" + rhs + ":0)";
   index_tuple new_index(new_i,index.bound0,new_j ,index.bound0);
   return tree_parsing::evaluate_expression(*info_.statement, info_.root_idx, new_index, 0, *info_.mapping, tree_parsing::LHS_NODE_TYPE);
 }
コード例 #8
0
 std::string generate_default(index_tuple const & index) const
 {
   std::string rhs = tree_parsing::evaluate_expression(*info_.statement, info_.root_idx, index, 0, *info_.mapping, tree_parsing::RHS_NODE_TYPE);
   std::string new_i = index.i + "+ ((" + rhs + "<0)?" + rhs + ":0)";
   std::string new_j = index.j + "- ((" + rhs + ">0)?" + rhs + ":0)";
   index_tuple new_index("min("+index.i+","+index.j+")", index.bound0);
   std::string lhs = tree_parsing::evaluate_expression(*info_.statement, info_.root_idx, new_index, 0, *info_.mapping, tree_parsing::LHS_NODE_TYPE);
   return "((" + new_i + ")!=(" + new_j + "))?0:"+lhs;
 }
コード例 #9
0
ファイル: speed_test.hpp プロジェクト: royertiago/MAC6711-src
    int get_key( std::mt19937 & rng ) {
        std::uniform_int_distribution<> new_index(0, keys.size() - 1);
        int index = new_index(rng);
        while( !keys[index].second )
            index = new_index(rng);

        int key = keys[index].first;
        keys[index].second = false;
        available_keys--;

        // Resize the vector
        if( keys.size() > 2 * available_keys )
            keys.erase( std::remove_if( keys.begin(), keys.end(),
                        [](auto x){ return !x.second; }
                        ));

        return key;
    }
コード例 #10
0
ファイル: colvargrid.cpp プロジェクト: BrianMoths/lammps
void colvar_grid_gradient::write_1D_integral(std::ostream &os)
{
  cvm::real bin, min, integral;
  std::vector<cvm::real> int_vals;

  os << "#       xi            A(xi)\n";

  if ( cv.size() != 1 ) {
    cvm::fatal_error("Cannot write integral for multi-dimensional gradient grids.");
  }

  integral = 0.0;
  int_vals.push_back( 0.0 );
  min = 0.0;

  // correction for periodic colvars, so that the PMF is periodic
  cvm::real corr;
  if ( periodic[0] ) {
    corr = average();
  } else {
    corr = 0.0;
  }

  for (std::vector<int> ix = new_index(); index_ok(ix); incr(ix)) {

    if (samples) {
      size_t const samples_here = samples->value(ix);
      if (samples_here)
        integral += (value(ix) / cvm::real(samples_here) - corr) * cv[0]->width;
    } else {
      integral += (value(ix) - corr) * cv[0]->width;
    }

    if ( integral < min ) min = integral;
    int_vals.push_back( integral );
  }

  bin = 0.0;
  for ( int i = 0; i < nx[0]; i++, bin += 1.0 ) {
    os << std::setw(10) << cv[0]->lower_boundary.real_value + cv[0]->width * bin << " "
       << std::setw(cvm::cv_width)
       << std::setprecision(cvm::cv_prec)
       << int_vals[i] - min << "\n";
  }

  os << std::setw(10) << cv[0]->lower_boundary.real_value + cv[0]->width * bin << " "
     << std::setw(cvm::cv_width)
     << std::setprecision(cvm::cv_prec)
     << int_vals[nx[0]] - min << "\n";

  return;
}
コード例 #11
0
ファイル: main.cpp プロジェクト: epicsdeb/channelarchiver
// Copy samples from archive with index_name
// to new index copy_name.
// Uses all samples in source archive or  [start ... end[ .
void copy(const stdString &index_name, const stdString &copy_name,
          int RTreeM, const epicsTime *start, const epicsTime *end,
          const stdString &single_name)
{
    IndexFile               index(RTreeM), new_index(RTreeM);
    IndexFile::NameIterator names;
    size_t                  channel_count = 0, value_count = 0, back_count = 0;
    BenchTimer              timer;
    stdString               dir1, dir2;
    Filename::getDirname(index_name, dir1);
    Filename::getDirname(copy_name, dir2);
    if (dir1 == dir2)
    {
        printf("You have to assert that the new index (%s)\n"
               "is in a  directory different from the old index\n"
               "(%s)\n", copy_name.c_str(), index_name.c_str());
        return;
    }
    index.open(index_name, true);
    new_index.open(copy_name, false);
    if (verbose)
        printf("Copying values from '%s' to '%s'\n",
               index_name.c_str(), copy_name.c_str());
    RawDataReader reader(index);
    if (single_name.empty())
    {
        bool ok = index.getFirstChannel(names);
        while (ok)
        {
            copy_channel(names.getName(), start, end, index, reader,
                         new_index, channel_count, value_count, back_count);
            ok = index.getNextChannel(names);
        }    
    }
    else
        copy_channel(single_name, start, end, index, reader,
                     new_index, channel_count, value_count, back_count);
    new_index.close();
    index.close();
    timer.stop();
    if (verbose)
    {
        printf("Total: %lu channels, %lu values\n",
               (unsigned long) channel_count, (unsigned long) value_count);
        printf("Skipped %lu back-in-time values\n",
               (unsigned long) back_count);
        printf("Runtime: %s\n", timer.toString().c_str());
    }
}
コード例 #12
0
void NetCdfConfigureDialog::reverseNorthSouth(double* data, size_t width, size_t height)
{
    double* cp_array = new double[width*height];

    for (size_t i=0; i<height; i++)
    {
        for (size_t j=0; j<width; j++)
        {
            size_t old_index((width*height)-(width*(i+1)));
            size_t new_index(width*i);
            cp_array[new_index+j] = data[old_index+j];
        }
    }

    size_t length(height*width);
    for (size_t i=0; i<length; i++)
        data[i] = cp_array[i];

    delete[] cp_array;
}
コード例 #13
0
ファイル: RCSTREES.C プロジェクト: daemqn/Atari_ST_Sources
LOCAL LONG ins_tree(LONG stree, WORD sobj, WORD kind, WORD dobj, WORD dx, WORD dy)
{
	LONG	dtree;
	BYTE	name[9];

	dtree = copy_tree(stree, sobj, TRUE);
	unique_name(&name[0], "TREE%hd", LWGET(RSH_NTREE(head)) + 1);
	if (new_index( (BYTE *) dtree, kind, &name[0]) == NIL)
	{
		hndl_alert(1, string_addr(STNFULL));
		return (-1L);
	}
	else
	{
		add_trindex(dtree);
		dobj = mov_tree(LWGET(RSH_NTREE(head)), dobj, dx, dy);
		select_tree(ad_view, trunpan_f(dobj));
		name_tree(dobj);
		dselct_tree(ad_view, trunpan_f(dobj));
		return (dtree);
	}
}
コード例 #14
0
ファイル: spmchash.cpp プロジェクト: 0x1997/concurrentqueue
 inline void insert_index_entry(IndexEntry*& idxEntry, std::uint64_t key)
 {
     auto localIndex = index.load(std::memory_order_relaxed);        // We're the only writer thread, relaxed is OK
     auto newTail = (localIndex->tail.load(std::memory_order_relaxed) + 1) & (localIndex->capacity - 1);
     idxEntry = localIndex->index[newTail];
     if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_KEY ||
         idxEntry->value.load(std::memory_order_relaxed) == nullptr) {
         
         idxEntry->key.store(key, std::memory_order_relaxed);
         localIndex->tail.store(newTail, std::memory_order_release);
         return;
     }
     
     // No room in the old index, try to allocate another one!
     new_index();
     localIndex = index.load(std::memory_order_relaxed);
     newTail = (localIndex->tail.load(std::memory_order_relaxed) + 1) & (localIndex->capacity - 1);
     idxEntry = localIndex->index[newTail];
     assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_KEY);
     idxEntry->key.store(key, std::memory_order_relaxed);
     localIndex->tail.store(newTail, std::memory_order_release);
 }
コード例 #15
0
ファイル: RCSDATA.C プロジェクト: daemqn/Atari_ST_Sources
VOID get_obname(BYTE *name, LONG ntree, WORD nobj)
{
	WORD	where;
	LONG	addr;

	addr = ntree + nobj * sizeof(OBJECT);
	where = find_value((BYTE *) addr);
	if (where != NIL)
	{
		if (!name[0] || name[0] == '@')
			del_index(where);
		else
			strcpy(get_name(where), name);
	}
	else
	{
		if (name[0] && name[0] != '@')
		{
			where = new_index((BYTE *)addr, OBJKIND, name);
			if (where == NIL)
				hndl_alert(1, string_addr(STNFULL));
		}
	}
}
コード例 #16
0
ファイル: idngram2lm.c プロジェクト: 10v/cmusphinx
int main(int argc, char **argv) {

  int i,j;
  ng_t* ng;
  int verbosity;
  int mem_alloc_method; /* Method used to decide how much memory to 
			   allocate for count tables */
  int buffer_size;
  flag is_ascii;
  ngram current_ngram;
  ngram previous_ngram;
  count_t *ng_count; /* Array indicating the number of occurrances of 
			   the current 1-gram, 2-gram, ... ,n-gram 
			   Size depends on #define in general.h
			*/  
  int nlines;
  int pos_of_novelty;
  int prev_id1;
  flag contains_unks;
  int mem_alloced;

  flag displayed_oov_warning; /** Display OOV warning 
			       */

  /*  ------------------  Process command line --------------------- */

  report_version(&argc,argv);

  if (argc == 1 || pc_flagarg(&argc, argv,"-help")) {    
    /* Display help message */    
    help_message();
    exit(1);
  }

  verbosity = pc_intarg(&argc, argv,"-verbosity",DEFAULT_VERBOSITY);

  /* Initialization */
  {
    ng=init_ng(
	    &argc,
	    argv,
	    verbosity
	    );
    
    mem_alloc_method = init_alloc_method(ng, &argc, argv, &buffer_size);
    
    if (!strcmp(ng->id_gram_filename,"-") && mem_alloc_method == TWO_PASSES)
      quit(-1,"Error: If idngram is read from stdin, then cannot use -calc_mem option.\n");
    
    is_ascii = set_lmformat(pc_flagarg(&argc,argv,"-ascii_input"),
			    pc_flagarg(&argc,argv,"-bin_input"),
			    ng);  

    /* Report parameters */
    report_param(verbosity,ng,
		 is_ascii, mem_alloc_method, buffer_size);

    pc_report_unk_args(&argc,argv,verbosity);

  }

  /* --------------- Read in the vocabulary -------------- */
  read_vocab(ng,verbosity);
       		     
  /* --------------- Allocate space for the table_size array --------- */
  init_ng_table_size(ng, 
		     mem_alloc_method,
		     is_ascii,
		     verbosity,
		     buffer_size
		     );

  /* ----------- Allocate memory for tree structure -------------- */

  ng->count = NULL;
  ng->count4 = NULL;
  ng->marg_counts = NULL;
  ng->marg_counts4 = NULL;
  ng->count_table = NULL;

  ng->count = (count_ind_t **) rr_malloc(sizeof(count_ind_t *)*ng->n);
  ng->count4 = (count_t **) rr_malloc(sizeof(count_t *)*ng->n);    
  ng->count_table = (count_t **) rr_malloc(sizeof(count_t *)*ng->n);

  if (ng->four_byte_counts) {
    ng->marg_counts4 = (count_t *) rr_calloc(sizeof(count_t), ng->table_sizes[0]);

  }else {
    for (i=0;i<=ng->n-1;i++) 
      ng->count_table[i] = (count_t *) rr_calloc(ng->count_table_size+1,
						sizeof(count_t));

    ng->marg_counts = (count_ind_t *) rr_calloc(sizeof(count_ind_t),ng->table_sizes[0]);
    fprintf(stderr, "table_size %d\n",ng->table_sizes[0]);
    fflush(stderr);
  }

  ng->word_id = (id__t **) rr_malloc(sizeof(id__t *)*ng->n);

  if (ng->four_byte_alphas) {
    ng->bo_weight4 = (four_byte_t **) rr_malloc(sizeof(four_byte_t *)*ng->n);
    ng->bo_weight4[0] = (four_byte_t *) rr_malloc(sizeof(four_byte_t)*
						ng->table_sizes[0]);
  }else {
    ng->bo_weight = (bo_weight_t **) rr_malloc(sizeof(bo_weight_t *)*ng->n);
    ng->bo_weight[0] = (bo_weight_t *) rr_malloc(sizeof(bo_weight_t)*
						ng->table_sizes[0]);
  }

  ng->ind = (index__t **)  rr_malloc(sizeof(index__t *)*ng->n);

  /* First table */
  if (ng->four_byte_counts) 
    ng->count4[0] = (count_t *) rr_calloc(ng->table_sizes[0],sizeof(count_t));
  else 
    ng->count[0] = (count_ind_t *) rr_calloc(ng->table_sizes[0],sizeof(count_ind_t));

  ng->uni_probs = (uni_probs_t *) rr_malloc(sizeof(uni_probs_t)*
					   ng->table_sizes[0]);
  ng->uni_log_probs = (uni_probs_t *) rr_malloc(sizeof(uni_probs_t)*
					       ng->table_sizes[0]);

  if (ng->n >=2) 
    ng->ind[0] = (index__t *) rr_calloc(ng->table_sizes[0],sizeof(index__t));

  for (i=1;i<=ng->n-2;i++) {    
    ng->word_id[i] = (id__t *) rr_malloc(sizeof(id__t)*ng->table_sizes[i]);

    if (ng->four_byte_counts) 
      ng->count4[i] = (count_t *) rr_malloc(sizeof(count_t)*ng->table_sizes[i]);
    else 
      ng->count[i] = (count_ind_t *) rr_malloc(sizeof(count_ind_t)*ng->table_sizes[i]);

    if (ng->four_byte_alphas) 
      ng->bo_weight4[i] = (four_byte_t *) rr_malloc(sizeof(four_byte_t)*ng->table_sizes[i]);
    else 
      ng->bo_weight[i] = (bo_weight_t *) rr_malloc(sizeof(bo_weight_t)*ng->table_sizes[i]);
    
    ng->ind[i] = (index__t *) rr_malloc(sizeof(index__t)*ng->table_sizes[i]);

    mem_alloced = sizeof(count_ind_t) + sizeof(bo_weight_t) + 
		sizeof(index__t) + sizeof(id__t);
    
    if (ng->four_byte_alphas) 
      mem_alloced += 4;
   
    mem_alloced *= ng->table_sizes[i];
    
    pc_message(verbosity,2,"Allocated %d bytes to table for %d-grams.\n",
	       mem_alloced,i+1);
    
  }

  ng->word_id[ng->n-1] = (id__t *) 
    rr_malloc(sizeof(id__t)*ng->table_sizes[ng->n-1]);

  if (ng->four_byte_counts) 
    ng->count4[ng->n-1] = (count_t *) rr_malloc(sizeof(count_t)*ng->table_sizes[ng->n-1]);    
  else 
    ng->count[ng->n-1] = (count_ind_t *) rr_malloc(sizeof(count_ind_t)*ng->table_sizes[ng->n-1]);

  pc_message(verbosity,2,"Allocated (%d+%d) bytes to table for %d-grams.\n",
	     ng->four_byte_counts?sizeof(count_t):sizeof(count_ind_t),
	     sizeof(id__t)*ng->table_sizes[ng->n-1],ng->n);
  
  /* Allocate memory for table for first-byte of indices */

  ng_allocate_ptr_table(ng,NULL,0);

  /* Allocate memory for alpha array */

  ng->alpha_array = (double *) rr_malloc(sizeof(double)*ng->out_of_range_alphas);
  ng->size_of_alpha_array = 0;

  /* Allocate memory for frequency of frequency information */

  ng->freq_of_freq = (fof_t **) rr_malloc(sizeof(fof_t *)*ng->n);

  NG_DISC_METH(ng)->allocate_freq_of_freq(ng);

  /* Read n-grams into the tree */
  pc_message(verbosity,2,"Processing id n-gram file.\n");
  pc_message(verbosity,2,"20,000 n-grams processed for each \".\", 1,000,000 for each line.\n");

  /* Allocate space for ngrams id arrays */

  current_ngram.id_array = (id__t *) rr_calloc(ng->n,sizeof(id__t));
  previous_ngram.id_array = (id__t *) rr_calloc(ng->n,sizeof(id__t));
  current_ngram.n = ng->n;
  previous_ngram.n = ng->n;
  
  ng->num_kgrams = (ngram_sz_t *) rr_calloc(ng->n,sizeof(ngram_sz_t));
  ng_count = (count_t *) rr_calloc(ng->n,sizeof(count_t));
  nlines = 1;
  ng->n_unigrams = 0;

  /* Process first n-gram */  
  get_ngram(ng->id_gram_fp,&current_ngram,is_ascii);
  contains_unks = ngram_chk_contains_unks(&current_ngram,ng->n);

  /* Skip over any unknown words.  They will come first, because <UNK>
     always has a word ID of zero. */
  while (ng->vocab_type == CLOSED_VOCAB && contains_unks){
    /* Stop looking if there are no more N-Grams.  Of course, this
       means training will fail, since there are no unigrams. */
    if (get_ngram(ng->id_gram_fp,&current_ngram,is_ascii) == 0)
      break;
    contains_unks = ngram_chk_contains_unks(&current_ngram,ng->n);
  }

  for (i=0;i<=ng->n-2;i++) {
    ng->ind[i][0] = new_index(0,ng->ptr_table[i],&(ng->ptr_table_size[i]),0);
    ng->word_id[i+1][0] = current_ngram.id_array[i+1];
    ng->num_kgrams[i+1]++;
    ng_count[i] = current_ngram.count;
  }

  ng_count[0] = current_ngram.count;

  NG_DISC_METH(ng)->update_freq_of_freq(ng,ng->n-1,current_ngram.count);

  store_normal_count(ng,0,current_ngram.count,ng->n-1);

  if (current_ngram.count <= ng->cutoffs[ng->n-2]) 
    ng->num_kgrams[ng->n-1]--;

  ngram_copy(&previous_ngram,&current_ngram,ng->n);

  prev_id1 = current_ngram.id_array[0];
    
  displayed_oov_warning = 0;

  while (!rr_feof(ng->id_gram_fp)) {

    if (get_ngram(ng->id_gram_fp,&current_ngram,is_ascii)) {

      if (ng->vocab_type == CLOSED_VOCAB)
	contains_unks=ngram_chk_contains_unks(&current_ngram,ng->n);
    
      if (!contains_unks || ng->vocab_type != CLOSED_VOCAB) {

	/* Test for where this ngram differs from last - do we have an
	   out-of-order ngram? */
	pos_of_novelty = ngram_find_pos_of_novelty(&current_ngram,&previous_ngram,ng->n,nlines);
    
	nlines++; 
	show_idngram_nlines(nlines, verbosity);
    
	/* Add new n-gram as soon as it is encountered */
	/* If all of the positions 2,3,...,n of the n-gram are context
	   cues then ignore the n-gram. */
    
	if (ng->n > 1) {
	  NG_DISC_METH(ng)->update_freq_of_freq(ng,ng->n-1,current_ngram.count);
	        
	  store_normal_count(ng,ng->num_kgrams[ng->n-1],current_ngram.count,ng->n-1);
	  
	  ng->word_id[ng->n-1][ng->num_kgrams[ng->n-1]] = current_ngram.id_array[ng->n-1];
	  ng->num_kgrams[ng->n-1]++;	  
	  
	  if (ng->num_kgrams[ng->n-1] >= ng->table_sizes[ng->n-1])
	    quit(-1,"\nMore than %d %d-grams needed to be stored. Rerun with a higher table size.\n",ng->table_sizes[ng->n-1],ng->n);
	}
	/* Deal with new 2,3,...,(n-1)-grams */
      
	for (i=ng->n-2;i>=MAX(1,pos_of_novelty);i--) {

	  NG_DISC_METH(ng)->update_freq_of_freq(ng,i,ng_count[i]);
	  
	  if (ng_count[i] <= ng->cutoffs[i-1]) 
	    ng->num_kgrams[i]--;
	  else
	    store_normal_count(ng,ng->num_kgrams[i]-1,ng_count[i],i);

	  ng_count[i] = current_ngram.count;
	  ng->word_id[i][ng->num_kgrams[i]] = current_ngram.id_array[i];
	  ng->ind[i][ng->num_kgrams[i]] = new_index(ng->num_kgrams[i+1]-1,
						    ng->ptr_table[i],
						    &(ng->ptr_table_size[i]),
						    ng->num_kgrams[i]);
	  ng->num_kgrams[i]++;
	
	  if (ng->num_kgrams[i] >= ng->table_sizes[i])
	    quit(-1,"More than %d %d-grams needed to be stored. Rerun with a higher table size.\n",ng->table_sizes[i],i+1);	  
	}
      
	for (i=0;i<=pos_of_novelty-1;i++) 
	  ng_count[i] += current_ngram.count;
      
	/* Deal with new 1-grams */
      
	if (pos_of_novelty == 0) {
	  if (ng->n>1) {
	    for (i = prev_id1 + 1; i <= current_ngram.id_array[0]; i++) {
	      ng->ind[0][i] = new_index(ng->num_kgrams[1]-1,
				       ng->ptr_table[0],
				       &(ng->ptr_table_size[0]),
				       i);
	    }
	    prev_id1 = current_ngram.id_array[0];
	  }

	  NG_DISC_METH(ng)->update_freq_of_freq(ng,0,ng_count[0]);

	  if (!ng->context_cue[previous_ngram.id_array[0]]) {
	    ng->n_unigrams += ng_count[0];
	    store_normal_count(ng,previous_ngram.id_array[0],ng_count[0],0);
	  }

	  store_marginal_count(ng,previous_ngram.id_array[0],ng_count[0],0);
		      
	  ng_count[0] = current_ngram.count;
	}

	if (current_ngram.count <= ng->cutoffs[ng->n-2]) 
	  ng->num_kgrams[ng->n-1]--;

	ngram_copy(&previous_ngram,&current_ngram,ng->n);

      }else {
	if (!displayed_oov_warning){
	  pc_message(verbosity,2,"Warning : id n-gram stream contains OOV's (n-grams will be ignored).\n");
	  displayed_oov_warning = 1;
	}
      }
    }
  }

  rr_iclose(ng->id_gram_fp);

  for (i=ng->n-2;i>=1;i--) {

    NG_DISC_METH(ng)->update_freq_of_freq(ng,i,ng_count[i]);

    if (ng_count[i] <= ng->cutoffs[i-1]) 
      ng->num_kgrams[i]--;
    else 
      store_normal_count(ng,ng->num_kgrams[i]-1,ng_count[i],i);
      
  }
  
  NG_DISC_METH(ng)->update_freq_of_freq(ng,0,ng_count[0]);

  if (!ng->context_cue[current_ngram.id_array[0]]) {
    ng->n_unigrams += ng_count[0];
    store_normal_count(ng,current_ngram.id_array[0],ng_count[0],0);
  }

  store_marginal_count(ng,current_ngram.id_array[0],ng_count[0],0);

  if (ng->n>1) {
    for (i=current_ngram.id_array[0]+1;i<=ng->vocab_size;i++)
      ng->ind[0][i] = new_index(ng->num_kgrams[1],
				ng->ptr_table[0],
				&(ng->ptr_table_size[0]),
				current_ngram.id_array[0]);
  }

  /* The idngram reading is completed at this point */
  pc_message(verbosity,2,"\n");

  /* Impose a minimum unigram count, if required */

  if (ng->min_unicount > 0) {

    int nchanged= 0;

    for (i=ng->first_id;i<=ng->vocab_size;i++) {
      if ((return_count(ng->four_byte_counts,
			ng->count_table[0],
			ng->count[0],
			ng->count4[0],
			i) < ng->min_unicount) && !ng->context_cue[i]) {

	/* There was a bug in V2's switch.  Look at segment for ABSOLUTE */
	NG_DISC_METH(ng)->reduce_ug_freq_of_freq(ng,i);
	ng->n_unigrams += (ng->min_unicount - ng->count[0][i]);
	store_normal_count(ng,i,ng->min_unicount,0);
	nchanged++;
      }
    }

    if (nchanged > 0) 
      pc_message(verbosity,2,
		 "Unigram counts of %d words were bumped up to %d.\n",
		 nchanged,ng->min_unicount);
  }

  /* Count zeroton information for unigrams */

  ng->freq_of_freq[0][0] = 0;
  
  for (i=ng->first_id;i<=ng->vocab_size;i++) {
    if (return_count(ng->four_byte_counts,
		     ng->count_table[0],
		     ng->count[0],
		     ng->count4[0],
		     i) == 0) {
      ng->freq_of_freq[0][0]++;
    }
  }  

  if (ng->discounting_method == GOOD_TURING) {
    for (i=0;i<=ng->n-1;i++) 
      for (j=1;j<=ng->fof_size[i];j++) 
	pc_message(verbosity,3,"fof[%d][%d] = %d\n",i,j,ng->freq_of_freq[i][j]);
  }

  pc_message(verbosity,2,"Calculating discounted counts.\n");

  NG_DISC_METH(ng)->compute_discount_aux(ng, verbosity);
     
  /* Smooth unigram distribution, to give some mass to zerotons */     
  compute_unigram(ng,verbosity);

  /* Increment Contexts if using Good-Turing discounting-> No need otherwise,
     since all values are discounted anyway. */

  if (ng->discounting_method == GOOD_TURING) {
    pc_message(verbosity,2,"Incrementing contexts...\n");  

    for (i=ng->n-1;i>=1;i--) 
      increment_context(ng,i,verbosity);      
  }

  /* Calculate back-off weights */

  pc_message(verbosity,2,"Calculating back-off weights...\n");

  for (i=1;i<=ng->n-1;i++) 
    compute_back_off(ng,i,verbosity);

  if (!ng->four_byte_alphas) 
    pc_message(verbosity,3,"Number of out of range alphas = %d\n",
	       ng->size_of_alpha_array);

  /* Write out LM */

  pc_message(verbosity,2,"Writing out language model...\n");

  if (ng->write_arpa)
    write_arpa_lm(ng,verbosity);

  if (ng->write_bin) 
    write_bin_lm(ng,verbosity);

  pc_message(verbosity,0,"idngram2lm : Done.\n");

  return 0;    
}
コード例 #17
0
ファイル: ReedMullerCoder.cpp プロジェクト: askmyhat/coder
ReedMullerSystematicCoder::ReedMullerSystematicCoder(int order, int length_power) :
  ReedMullerCoder(order, length_power),
  signal_permutation_(std::valarray<bool>(signal_length_ * signal_length_)),
  signal_inverse_permutation_(std::valarray<bool>(signal_length_ * signal_length_)),
  code_permutation_(std::valarray<bool>(code_length_ * code_length_)),
  code_inverse_permutation_(std::valarray<bool>(code_length_ * code_length_))
{
  // Finding the new order of columns from generator matrix making it triangular.
  // The value -1 means this element wasn't processed.
  std::vector<int> new_index(code_length_, -1);
  std::vector<int> old_index(code_length_, -1);
  for (int new_column = 0; new_column < signal_length_; ++new_column) {
    for (int old_column = 0; old_column < code_length_; ++old_column) {
      if (Weight(generator_[std::slice(new_column * code_length_ + old_column,
            signal_length_ - new_column, code_length_)]) == 1 &&
          Weight(generator_[std::slice((new_column + 1) * code_length_ + old_column,
            signal_length_ - new_column - 1, code_length_)]) == 0) {
        new_index[new_column] = old_column;
        old_index[old_column] = new_column;
        break;
      }
    }
  }

  // Permutation of columns from generator matrix
  for (int new_column = signal_length_; new_column < code_length_; ++new_column) {
    for (int old_column = 0; old_column < code_length_; ++old_column) {
      if (old_index[old_column] == -1) {
        new_index[new_column] = old_column;
        old_index[old_column] = new_column;
        break;
      }
    }
  }

  for (int row = 0; row < code_length_; ++row) {
    code_permutation_[row * code_length_ + old_index[row]] = 1;
    code_inverse_permutation_[row * code_length_ + new_index[row]] = 1;
  }

  for (int column = 0; column < signal_length_; ++column) {
    signal_inverse_permutation_[std::slice(column, signal_length_, signal_length_)] = generator_[std::slice(new_index[column], signal_length_, code_length_)];
  }

  // Filling signal_permutation_ by rows since source matrix have triangular form.
  // Product of signal_permutation_ and square submatrix of generator matrix with 
  // new order of columns must be the identity matrix.
  std::valarray<bool> permutation_row(signal_length_);
  std::valarray<bool> generator_column(signal_length_);
  int element_value = 0;
  for (int row = 0; row < signal_length_; ++row) {
    for (int column = 0; column < signal_length_; ++column) {
      permutation_row = signal_permutation_[std::slice(row * signal_length_, signal_length_, 1)];
      generator_column = generator_[std::slice(new_index[column], signal_length_, code_length_)];

      // Product of permutation row and generator column must be 1 on diagonal and 0 else.
      element_value = (Weight(permutation_row * generator_column) % 2) != (row == column);
      signal_permutation_[row * signal_length_ + column] = element_value;
    }
  }
}
コード例 #18
0
ファイル: spmchash.cpp プロジェクト: 0x1997/concurrentqueue
 explicit SPMCSequentialHashMap(std::size_t initialSize)
     : nextCapacity(initialSize), index(nullptr)
 {
     new_index();
 }
コード例 #19
0
ファイル: lm_combine.c プロジェクト: QuinnEbert/zedom8or
void combine_lm(arpa_lm_t *arpa_lm, arpa_lm_t *lm1, arpa_lm_t *lm2)
{
	char *in_line;
	char *input_line;
	int i,j,k;
	int num_of_args;
	int pos_of_novelty;
	char *input_line_ptr_orig;
	char *word_copy;
	id__t *previous_ngram;
	id__t *current_ngram;
	vocab_sz_t temp_id;
	vocab_sz_t *pos_in_list;
	int previd;
	TBROWSE_UNION bru;
	char** words;
	
	words=(char**)NewArray(15,MAX_WORD,sizeof(char));
	
	in_line = (char *) rr_malloc(1024*sizeof(char));
	input_line = (char *) rr_malloc(1024*sizeof(char));
#import "OpenEarsStaticAnalysisToggle.h"
#ifdef STATICANALYZEDEPENDENCIES
#define __clang_analyzer__ 1
#endif
#if !defined(__clang_analyzer__) || defined(STATICANALYZEDEPENDENCIES)
#undef __clang_analyzer__	
	input_line_ptr_orig = input_line;
#endif	
	
	/* Read number of each k-gram */
	
	arpa_lm->table_sizes = (table_size_t *) rr_malloc(sizeof(table_size_t)*11);
    
	arpa_lm->num_kgrams = (ngram_sz_t *) rr_malloc(sizeof(ngram_sz_t)*11);
		
	calc_merged_ngram_num(arpa_lm, lm1, lm2);
	
	previous_ngram = (id__t *) rr_calloc(arpa_lm->n,sizeof(id__t));
	current_ngram = (id__t *) rr_calloc(arpa_lm->n,sizeof(id__t));

	pos_in_list = (vocab_sz_t *) rr_malloc(sizeof(vocab_sz_t) * arpa_lm->n);
	ng_arpa_lm_alloc_struct(arpa_lm);
	
	/* Process 1-grams */
	
	printf("Reading unigrams...\n");
	
	i=0;
	
	begin_browse_union(lm1,lm2,1,&bru);
	
	while (get_next_ngram_union(words,&bru)) {
	  word_copy = rr_salloc(words[0]);
	  /* Do checks about open or closed vocab */
	  check_open_close_vocab(arpa_lm,word_copy,&i);
	}
	
	/* Process 2, ... , n-1 grams */
#import "OpenEarsStaticAnalysisToggle.h"
#ifdef STATICANALYZEDEPENDENCIES
#define __clang_analyzer__ 1
#endif
#if !defined(__clang_analyzer__) || defined(STATICANALYZEDEPENDENCIES)
#undef __clang_analyzer__	
	previd = -1;
	
	for (i=2;i<=arpa_lm->n-1;i++) {
		
		printf("\nReading %d-grams...\n",i);
		
		previd = -1;
		
		j=0;
		
		for (k=0;k<=arpa_lm->n-1;k++) {
			pos_in_list[k] = 0;
		}
		
		begin_browse_union(lm1,lm2,i,&bru);
		while (get_next_ngram_union(words,&bru)) {
			
			/* Process line into all relevant temp_words */			
			num_of_args = 0;						
#endif		
			sih_lookup(arpa_lm->vocab_ht,words[i-1],&temp_id);
			arpa_lm->word_id[i-1][j] = temp_id;
			
			show_dot(j);
			
			j++;
			if (j>arpa_lm->table_sizes[i-1]) {
				quit(-1,"Error - Header information in ARPA format language model is incorrect.\nMore than %d %d-grams needed to be stored.\n",arpa_lm->table_sizes[i-1],i);
			}
			
			/* Make sure that indexes in previous table point to 
			the right thing. */
			
			for (k=0;k<=i-1;k++) {
				previous_ngram[k] = current_ngram[k];
				sih_lookup(arpa_lm->vocab_ht,words[k],&temp_id);
				if (temp_id == 0 && strcmp(words[k],"<UNK>")) {
					quit(-1,"Error - found unknown word in n-gram file : %s\n",
						words[k]);
				}
				current_ngram[k] = temp_id;
			}
			
			/* Find position of novelty */
			
			/*bug fixed, for the first ngram, pos_of novelty should be 0 - Wei Xu*/
			if (j==1) pos_of_novelty=0;
			else {
				pos_of_novelty = i;
				
				for (k=0;k<=i-1;k++) {
					if (current_ngram[k] > previous_ngram[k]) {
						pos_of_novelty = k;
						k = arpa_lm->n;
					}
					else {
						if ((current_ngram[k] > previous_ngram[k]) && (j > 0)) {
							quit(-1,"Error : n-grams are not correctly ordered.\n");
						}
					}
				}
			}
			
			if (pos_of_novelty == i && j != 1)
			  quit(-1,"Error - Repeated %d-gram in ARPA format language model.\n",
			       i);
			
			if (pos_of_novelty != i-1) {
				if (i==2) {
					/* Deal with unigram pointers */
					
					for (k = previd + 1; k <= current_ngram[0]; k++) {
						arpa_lm->ind[0][k] = new_index(j-1,
							arpa_lm->ptr_table[0],
							&(arpa_lm->ptr_table_size[0]),
							k);
					}
					previd = current_ngram[0];
				}else {
					
					for (k=pos_of_novelty;k<=i-2;k++) {
						if (k == 0) {
							pos_in_list[0] = current_ngram[0];
						}
						else {
							pos_in_list[k] = 
								MIN(get_full_index(arpa_lm->ind[k-1][pos_in_list[k-1]],
								arpa_lm->ptr_table[k-1],   
								arpa_lm->ptr_table_size[k-1],   
								pos_in_list[k-1]),pos_in_list[k]);
							while (arpa_lm->word_id[k][pos_in_list[k]] < 
								current_ngram[k]) {
								pos_in_list[k]++;
							}
						}
					}
					for (k = previd + 1; k <= pos_in_list[i-2]; k++) {
						arpa_lm->ind[i-2][k] = 
							new_index(j-1,
							arpa_lm->ptr_table[i-2],
							&(arpa_lm->ptr_table_size[i-2]),
							k);
					}
					previd = pos_in_list[i-2];	    
				}
			}
		}
	
		/* Now need to tidy up pointers for bottom section of unigrams */
	
		for (k = previd + 1; k <= arpa_lm->vocab_size; k++) {
			arpa_lm->ind[0][k] = new_index(arpa_lm->num_kgrams[1],
				arpa_lm->ptr_table[0],
				&(arpa_lm->ptr_table_size[0]),
				k);
		}      
	
	}
  
	printf("\nReading %d-grams...\n",arpa_lm->n);
	
	j = 0;
	previd = 0;
	
	arpa_lm->ind[arpa_lm->n-2][0] = 0;
	
	for (k=0;k<=arpa_lm->n-1;k++) {
		/* bug fixed by Wei Xu : this is a serious bug*/
		pos_in_list[k] = 0;
		//    pos_in_list[0] = 0;
	}
	
	begin_browse_union(lm1,lm2,arpa_lm->n,&bru);
	while (get_next_ngram_union(words,&bru)) {

	  show_dot(j);
	  
		sih_lookup(arpa_lm->vocab_ht,words[arpa_lm->n-1],&temp_id);
		
		arpa_lm->word_id[arpa_lm->n-1][j] = temp_id;
		
		j++;
		
		for (k=0;k<=arpa_lm->n-1;k++) {
			previous_ngram[k] = current_ngram[k];
			sih_lookup(arpa_lm->vocab_ht,words[k],&temp_id);
			if (temp_id == 0 && strcmp(words[k],"<UNK>")) {
				quit(-1,"Error - found unknown word in n-gram file : %s\n",
					words[k]);
			}
			current_ngram[k] = temp_id;
		}
		
		/* Find position of novelty */
		
		/*bug fixed, for the first ngram, pos_of novelty should be 0 - Wei Xu*/
		if (j==1) pos_of_novelty=0;
		else {
			pos_of_novelty = arpa_lm->n+1;
			
			for (k=0;k<=arpa_lm->n-1;k++) {
				if (current_ngram[k] > previous_ngram[k]) {
					pos_of_novelty = k;
					k = arpa_lm->n;
				}else {
					if ((current_ngram[k] > previous_ngram[k]) && (j>0)) {
						quit(-1,"Error : n-grams are not correctly ordered.\n");
					}
				}
			}
		}
		
		if ( pos_of_novelty == arpa_lm->n+1 && j != 1 ) {
			quit(-1,"Error : Same %d-gram occurs twice in ARPA format LM.\n",
				arpa_lm->n);
		}
		
		if (pos_of_novelty != arpa_lm->n-1) {
			
			for (k=pos_of_novelty;k<=arpa_lm->n-2;k++) {
				if (k == 0) {
					pos_in_list[0] = current_ngram[0];
				}else {
					pos_in_list[k] = 
						MAX(get_full_index(arpa_lm->ind[k-1][pos_in_list[k-1]],
						arpa_lm->ptr_table[k-1],   
						arpa_lm->ptr_table_size[k-1],   
						pos_in_list[k-1]),pos_in_list[k]);
					while (arpa_lm->word_id[k][pos_in_list[k]] < 
						current_ngram[k]) {
						pos_in_list[k]++;
					}
				}
			}
			for (k = previd + 1; k <= pos_in_list[arpa_lm->n-2]; k++) {
				arpa_lm->ind[arpa_lm->n-2][k] = 
					new_index(j-1,
					arpa_lm->ptr_table[arpa_lm->n-2],
					&(arpa_lm->ptr_table_size[arpa_lm->n-2]),
					k);
			}
			previd = pos_in_list[arpa_lm->n-2];
		}
		
		if (j>arpa_lm->table_sizes[arpa_lm->n-1]) {
			quit(-1,"Error - Header information in ARPA format language model is incorrect.\nMore than %d %d-grams needed to be stored.\n",arpa_lm->table_sizes[arpa_lm->n-1],arpa_lm->n-1);
		}
	}
	
	
	
	/* Tidy up */
	
	
	free(previous_ngram);
	free(current_ngram);
	free(in_line);
	free(input_line);
	DeleteArray(words);
  
}
コード例 #20
0
ファイル: checkout.c プロジェクト: kevinwang/troll
int cmd_checkout(int argc, const char *argv[]) {
    new_index((char *)(argv[1]));
    copy_out();
    return 0;
}
コード例 #21
0
void analyze_trace(const char *path)
{
	struct btsnoop *btsnoop_file;
	unsigned long num_packets = 0;
	uint32_t type;

	btsnoop_file = btsnoop_open(path, BTSNOOP_FLAG_PKLG_SUPPORT);
	if (!btsnoop_file)
		return;

	type = btsnoop_get_type(btsnoop_file);

	switch (type) {
	case BTSNOOP_TYPE_HCI:
	case BTSNOOP_TYPE_UART:
	case BTSNOOP_TYPE_MONITOR:
		break;
	default:
		fprintf(stderr, "Unsupported packet format\n");
		goto done;
	}

	dev_list = queue_new();
	if (!dev_list) {
		fprintf(stderr, "Failed to allocate device list\n");
		goto done;
	}

	while (1) {
		unsigned char buf[BTSNOOP_MAX_PACKET_SIZE];
		struct timeval tv;
		uint16_t index, opcode, pktlen;

		if (!btsnoop_read_hci(btsnoop_file, &tv, &index, &opcode,
								buf, &pktlen))
			break;

		switch (opcode) {
		case BTSNOOP_OPCODE_NEW_INDEX:
			new_index(&tv, index, buf, pktlen);
			break;
		case BTSNOOP_OPCODE_DEL_INDEX:
			del_index(&tv, index, buf, pktlen);
			break;
		case BTSNOOP_OPCODE_COMMAND_PKT:
			command_pkt(&tv, index, buf, pktlen);
			break;
		case BTSNOOP_OPCODE_EVENT_PKT:
			event_pkt(&tv, index, buf, pktlen);
			break;
		case BTSNOOP_OPCODE_ACL_TX_PKT:
		case BTSNOOP_OPCODE_ACL_RX_PKT:
			acl_pkt(&tv, index, buf, pktlen);
			break;
		case BTSNOOP_OPCODE_SCO_TX_PKT:
		case BTSNOOP_OPCODE_SCO_RX_PKT:
			sco_pkt(&tv, index, buf, pktlen);
			break;
		case BTSNOOP_OPCODE_OPEN_INDEX:
		case BTSNOOP_OPCODE_CLOSE_INDEX:
			break;
		default:
			fprintf(stderr, "Wrong opcode %u\n", opcode);
			goto done;
		}

		num_packets++;
	}

	printf("Trace contains %lu packets\n\n", num_packets);

	queue_destroy(dev_list, dev_destroy);

done:
	btsnoop_unref(btsnoop_file);
}
コード例 #22
0
ファイル: inference.cpp プロジェクト: pswzyu/kinect_pose
CliqueTree Inference::CreateCliqueTreeHMMFast( vector<Factor> factors )
{

    int maxVar = 0;
	// get the max var id
	for ( vector<Factor>::iterator iter = factors.begin();
		iter != factors.end(); iter ++ )
	{
		double max_now = iter->var.max();
		if ( max_now > maxVar )
		{
			maxVar = max_now;
		}
	}
    int numNodes = maxVar - 1;
    int card = factors[0].card(0);

    CliqueTree P(numNodes);
    //P.cliqueList = repmat(struct('var', [], 'card', [], 'val', []), numNodes, 1);
    //P.edges = zeros(numNodes);

	for ( int i = 0; i != numNodes; i++ )
	{
        P.cliqueList[i].var << i+1 << i+2;
        P.cliqueList[i].card << card << card;
        P.cliqueList[i].val = ones<vec>(card * card);
	    
        if (i > 0)
	    {
	        P.edges(i, i-1) = 1;
	        P.edges(i-1, i) = 1;
        }
	}

    // the name of the variable starts from 1 !!!!
    for ( int i = 0; i != factors.size(); i ++ )
	{
        Factor f = factors[i];
        int cliqueIdx = 0;
        if (f.var.n_rows == 1)
	    {
            if (f.var(0) > 1)
	        {
                cliqueIdx = f.var(0) - 1;
	        }else{
                cliqueIdx = 1;
	        }
	        
            vec updateIdxs;
            mat assignments = zeros<mat>(card, 2);
            vec cards;
            cards << card << card;
	        for ( int assignment = 0; assignment != card; assignment ++ )
	        {
               if (f.var(0) == cliqueIdx)
	           {
                   assignments.col(0) = linspace<vec>(0, card - 1, card);
                   assignments.col(1) = assignment*ones<vec>(card);
                   updateIdxs = Utils::AssignmentToIndex(assignments, cards);
	           }else{
                   assignments.col(1) = linspace<vec>(0, card - 1, card);
                   assignments.col(0) = assignment*ones<vec>(card);
                   updateIdxs = Utils::AssignmentToIndex(assignments, cards);
               }
               for (int step = 0; step != updateIdxs.n_rows; step ++)
               {
                   P.cliqueList[cliqueIdx - 1].val(updateIdxs(step)) += f.val(step);
               }
	        }        
	    }else{
            if ( f.var.n_rows != 2 )
            {
                std::cout << "ERROR: var more than 2!" << std::endl;
            }
            cliqueIdx = min(f.var);
            if (f.var(0) > f.var(1))
	        {
	            // % sort the factor val so it's in increasing var order
                uvec order = sort_index(f.var); //%#ok
                mat oldAssignments = Utils::IndexToAssignment(linspace<vec>(0,f.val.n_rows-1,f.val.n_rows), f.card);
                mat newAssignments = oldAssignments;
                // (:, order)
                for (int step = 0; step != oldAssignments.n_cols; step ++)
                {
                    newAssignments.col(step) = oldAssignments.col(order(step));
                }
                vec new_card = f.card;
                for (int step = 0; step != f.card.n_rows; step ++)
                {
                    new_card(step) = f.card(order(step));
                }
                f.card = new_card;
                f.var = sort(f.var);
                vec new_index = Utils::AssignmentToIndex(newAssignments, f.card);
                vec new_val = f.val;
                for ( int step = 0; step != f.val.n_rows; step ++ )
                {
                    new_val(step) = f.val( new_index(step) );
                }
                f.val = new_val;
            }

            P.cliqueList[cliqueIdx - 1].val += f.val;
	    }
	}
    return P;
}