Example #1
0
/* let u refer to the more recent time  and d to the older time  */
int
changet_RY1 (int ci, int timeperiod)    // after Rannala and Yang (2003)  - rubberband method
{
  double metropolishastingsterm, newt, oldt;
  double pdgnew[MAXLOCI + MAXLINKED], pdgnewsum, pdgoldsum, probgnewsum,
    temppdg;
  double t_u_hterm, t_d_hterm, tpw;
  int li, i, j, ecd, ecu, emd, emu, ai, ui;
  double U;
  struct genealogy *G;
  struct edge *gtree;
  double t_d, t_u, t_u_prior, t_d_prior;
  double holdt[MAXPERIODS];


  if (assignmentoptions[POPULATIONASSIGNMENTCHECKPOINT] == 1)
  {
    assertgenealogy (ci);
  }

  t_u = (timeperiod == 0) ? 0 : C[ci]->tvals[timeperiod - 1];
  t_d =
    (timeperiod ==
     (lastperiodnumber - 1)) ? TIMEMAX : C[ci]->tvals[timeperiod + 1];
  t_d_prior = DMIN (T[timeperiod].pr.max, t_d);
  t_u_prior = DMAX (T[timeperiod].pr.min, t_u);
  oldt = C[ci]->tvals[timeperiod];
  newt = getnewt (timeperiod, t_u_prior, t_d_prior, oldt, 1);
  
  t_u_hterm = (newt - t_u) / (oldt - t_u);
  if (timeperiod == lastperiodnumber - 1)
  {
    t_d_hterm = 1;
  }
  else
  {
    t_d_hterm = (t_d - newt) / (t_d - oldt);
  }

  copy_treeinfo (&holdallgweight_t_RY, &C[ci]->allgweight);  // try turning this off and forcing all recalculations
  copy_probcalc (&holdallpcalc_t_RY, &C[ci]->allpcalc);
  for (i = 0; i < lastperiodnumber; i++)
    holdt[i] = C[ci]->tvals[i];


  pdgoldsum = C[ci]->allpcalc.pdg;
  setzero_genealogy_weights (&C[ci]->allgweight);
  ecd = ecu = emd = emu = 0;
  pdgnewsum = 0;
  probgnewsum = 0;
  storegenealogystats_all_loci (ci, 0);
  C[ci]->tvals[timeperiod] = newt;
  for (i = 0; i < nurates; i++)
    pdgnew[i] = 0;
  for (li = 0; li < nloci; li++)
  {
    G = &(C[ci]->G[li]);
    gtree = G->gtree;
    copy_treeinfo (&holdgweight_t_RY[li], &G->gweight);
    for (i = 0; i < L[li].numlines; i++)
    {
      if (gtree[i].down != -1)
      {
        if (gtree[i].time <= oldt && gtree[i].time > t_u)

        {
          //assert (skipflag[li][i] == 0);turn off 9/19/10
          gtree[i].time =
            beforesplit (timeperiod, oldt, newt, /* t_d, */ t_u, gtree[i].time);
          assert (gtree[i].time != newt);
          ecu++;
        }
        else
        {
          if (gtree[i].time > oldt && gtree[i].time < t_d)
          {
           // assert (skipflag[li][i] == 0); turn off 9/19/10
            gtree[i].time =
              aftersplit (timeperiod, oldt, newt, t_d, /* t_u, */ gtree[i].time);
            assert (gtree[i].time != newt);
            ecd++;
          }
          //else  do not change the time
        }
        j = 0;
        while (gtree[i].mig[j].mt > -0.5)
        {
          assert (gtree[i].mig[j].mt < C[0]->tvals[lastperiodnumber]);
          if (gtree[i].mig[j].mt <= oldt && gtree[i].mig[j].mt > t_u)
          {
            gtree[i].mig[j].mt =
              beforesplit (timeperiod, oldt, newt, /* t_d, */ t_u,
                           gtree[i].mig[j].mt);
            emu++;
          }
          else
          {
            assert (oldt < C[0]->tvals[lastperiodnumber]);
            if (gtree[i].mig[j].mt > oldt && gtree[i].mig[j].mt < t_d)
            {
              gtree[i].mig[j].mt =
                aftersplit (timeperiod, oldt, newt, t_d, /* t_u, */
                            gtree[i].mig[j].mt);
              emd++;
            }
            // else no need to change the time
          }
          j++;
        }
      }
    }
    if (G->roottime <= oldt && G->roottime > t_u
        /* && skipflag[li][G->root] == 0 turn off 9/19/10*/)
      G->roottime =
        beforesplit (timeperiod, oldt, newt, /* t_d, */ t_u, G->roottime);
    else if (G->roottime > oldt && G->roottime < t_d
            /* && skipflag[li][G->root] == 0 turn off 9/19/10*/)
      G->roottime =
        aftersplit (timeperiod, oldt, newt, t_d, /* t_u, */ G->roottime);
    setzero_genealogy_weights (&G->gweight);
        
    treeweight (ci, li);

    sum_treeinfo (&C[ci]->allgweight, &G->gweight);
    ai = 0;
    ui = L[li].uii[ai];

    switch (L[li].model)
    {
      assert (pdgnew[ui] == 0);
    case HKY:
      if (assignmentoptions[JCMODEL] == 1)
      {
        temppdg = pdgnew[ui] =
          likelihoodJC (ci, li, G->uvals[0]);
      }
      else
      {
        temppdg = pdgnew[ui] =
          likelihoodHKY (ci, li, G->uvals[0], G->kappaval, -1, -1, -1, -1);
      }
      break;
    case INFINITESITES:
      temppdg = pdgnew[ui] = likelihoodIS (ci, li, G->uvals[0]);
      break;
    case STEPWISE:
      temppdg = 0;
      for (; ai < L[li].nlinked; ai++)
      {
        ui = L[li].uii[ai];
        assert (pdgnew[ui] == 0);
        pdgnew[ui] = likelihoodSW (ci, li, ai, G->uvals[ai], 1.0);
        temppdg += pdgnew[ui];
      }
      break;
    case JOINT_IS_SW:
      temppdg = pdgnew[ui] = likelihoodIS (ci, li, G->uvals[0]);
      for (ai = 1; ai < L[li].nlinked; ai++)
      {
        ui = L[li].uii[ai];
        assert (pdgnew[ui] == 0);
        pdgnew[ui] = likelihoodSW (ci, li, ai, G->uvals[ai], 1.0);
        temppdg += pdgnew[ui];
      }
      break;
    }
    pdgnewsum += temppdg;
  }

  assert (!ODD (ecd));
  assert (!ODD (ecu));
  ecd /= 2;
  ecu /= 2;
  integrate_tree_prob (ci, &C[ci]->allgweight, &holdallgweight_t_RY,
                       &C[ci]->allpcalc, &holdallpcalc_t_RY, &holdt[0]);   // try enforcing full cacullation
  tpw = gbeta * (pdgnewsum - pdgoldsum);
/* 5/19/2011 JH adding thermodynamic integration */
  if (calcoptions[CALCMARGINALLIKELIHOOD])
  {
    metropolishastingsterm = beta[ci] * tpw + (C[ci]->allpcalc.probg - holdallpcalc_t_RY.probg) + (ecd + emd) * log (t_d_hterm) + (ecu + emu) * log (t_u_hterm);
  }
  else
  {
  tpw += C[ci]->allpcalc.probg - holdallpcalc_t_RY.probg;
    metropolishastingsterm = beta[ci] * tpw + (ecd + emd) * log (t_d_hterm) +   (ecu + emu) * log (t_u_hterm);
  }
  //assert(metropolishastingsterm >= -1e200 && metropolishastingsterm < 1e200);
  U = log (uniform ());
  if (U < DMIN(1.0, metropolishastingsterm))  //9/13/2010 
  //if (metropolishastingsterm >= 0.0 || metropolishastingsterm > U)
  {
    for (li = 0; li < nloci; li++)
    {
      C[ci]->G[li].pdg = 0;
      for (ai = 0; ai < L[li].nlinked; ai++)
      {
        C[ci]->G[li].pdg_a[ai] = pdgnew[L[li].uii[ai]];
        C[ci]->G[li].pdg += C[ci]->G[li].pdg_a[ai];
      }
      if (L[li].model == HKY)
      {
        storescalefactors (ci, li);
        copyfraclike (ci, li);
      }
    }
    C[ci]->allpcalc.pdg = pdgnewsum;
    C[ci]->poptree[C[ci]->droppops[timeperiod + 1][0]].time =
      C[ci]->poptree[C[ci]->droppops[timeperiod + 1][1]].time = newt;

    if (assignmentoptions[POPULATIONASSIGNMENTCHECKPOINT] == 1)
    {
      assertgenealogy (ci);
    }
    return 1;
  }
  else
  {
    copy_treeinfo (&C[ci]->allgweight, &holdallgweight_t_RY);
    copy_probcalc (&C[ci]->allpcalc, &holdallpcalc_t_RY);
    assert (pdgoldsum == C[ci]->allpcalc.pdg);
    C[ci]->tvals[timeperiod] = oldt;
    for (li = 0; li < nloci; li++)
    {
      G = &(C[ci]->G[li]);
      gtree = G->gtree;
      storegenealogystats_all_loci (ci, 1);
      copy_treeinfo (&G->gweight, &holdgweight_t_RY[li]);
      for (i = 0; i < L[li].numlines; i++)
      {
        if (gtree[i].down != -1)
        {
          if (gtree[i].time <= newt && gtree[i].time > t_u)
          {
           // assert (skipflag[li][i] == 0); turned off 9/19/10
            gtree[i].time =
              beforesplit (timeperiod, newt, oldt, /* t_d, */ t_u, gtree[i].time);
            //cecu++;
          }

          else
          {
            if (gtree[i].time > newt && gtree[i].time < t_d)
            {
             //assert (skipflag[li][i] == 0); turned off 9/19/10
              gtree[i].time =
                aftersplit (timeperiod, newt, oldt, t_d, /* t_u, */ gtree[i].time);
              //cecl++;
            }
          }
          j = 0;
          while (gtree[i].mig[j].mt > -0.5)
          {
            if (gtree[i].mig[j].mt <= newt && gtree[i].mig[j].mt > t_u)
            {
              gtree[i].mig[j].mt =
                beforesplit (timeperiod, newt, oldt, /* t_d, */
                             t_u, gtree[i].mig[j].mt);
              //cemu++;
            }
            else if (gtree[i].mig[j].mt > newt && gtree[i].mig[j].mt < t_d)
            {
              gtree[i].mig[j].mt =
                aftersplit (timeperiod, newt, oldt, t_d, /* t_u, */
                            gtree[i].mig[j].mt);
              //ceml++;
            }
            j++;
          }
        }
      }
//        assert(fabs(C[ci]->G[li].gtree[  C[ci]->G[li].gtree[C[ci]->G[li].root].up[0]].time - C[ci]->G[li].roottime) < 1e-8);    
    }
    /*    assert(ecu==cecu/2);
       assert(ecd==cecl/2);
       assert(emu==cemu);
       assert(emd==ceml); */
    for (li = 0; li < nloci; li++)
    {
      if (L[li].model == HKY)
        restorescalefactors (ci, li);
      /* have to reset the dlikeA values in the genealogies for stepwise model */
      if (L[li].model == STEPWISE)
        for (ai = 0; ai < L[li].nlinked; ai++)
          likelihoodSW (ci, li, ai, C[ci]->G[li].uvals[ai], 1.0);
      if (L[li].model == JOINT_IS_SW)
        for (ai = 1; ai < L[li].nlinked; ai++)
          likelihoodSW (ci, li, ai, C[ci]->G[li].uvals[ai], 1.0);
      // assert(fabs(C[ci]->G[li].gtree[  C[ci]->G[li].gtree[C[ci]->G[li].root].up[0]].time - C[ci]->G[li].roottime) < 1e-8);    
    }
    if (assignmentoptions[POPULATIONASSIGNMENTCHECKPOINT] == 1)
    {
      assertgenealogy (ci);
    }
    return 0;
  }
}                               /* changet_RY1 */
	const ShaderProgram& uniform(const char * name, double v0) const { return uniform(name, (float)v0); }
	const ShaderProgram& uniform(const char * name, Color v) const {
	  return uniform(name, v[0], v[1], v[2], v[3]);
	}
Example #4
0
void Shader::uniformF(const std::string &name, float f1, float f2)
{
    glf->glUniform2f(uniform(name), f1, f2);
}
Example #5
0
void Shader::uniformF(const std::string &name, const Vec4f &v)
{
    glf->glUniform4f(uniform(name), v.x(), v.y(), v.z(), v.w());
}
Example #6
0
void Shader::uniformI(const std::string &name, int i)
{
    glf->glUniform1i(uniform(name), i);
}
Example #7
0
void Shader::uniformI(const std::string &name, int i1, int i2, int i3, int i4)
{
    glf->glUniform4i(uniform(name), i1, i2, i3, i4);
}
Example #8
0
double RNG::rand()
{
	return uniform(twister);
}
Example #9
0
void CglObject::disableFog(int ID) {
    GLuint FOGID    = glGetUniformLocation(ID, "FOG");
    uniform(FOGID, 0.0f);
}
Example #10
0
    /**
     * Runs the Inverover algorithm for the number of generations specified in the constructor.
     *
     * @param[in,out] pop input/output pagmo::population to be evolved.
     */
    void inverover::evolve(population &pop) const
    {

	const problem::tsp* prob;
	//check if problem is of type pagmo::problem::tsp
	try
	{
		const problem::tsp& tsp_prob = dynamic_cast<const problem::tsp &>(pop.problem());
		prob = &tsp_prob;
	}
	catch (const std::bad_cast& e)
	{
		pagmo_throw(value_error,"Problem not of type pagmo::problem::tsp");
	}
	
	// Let's store some useful variables.

	const population::size_type NP = pop.size();
	const std::vector<std::vector<double> >& weights = prob->get_weights();
	const problem::base::size_type Nv = prob->get_n_cities();

	// Get out if there is nothing to do.
	if (m_gen == 0) {
		return;
	}
	
	// Initializing the random number generators
	boost::uniform_real<double> uniform(0.0,1.0);
	boost::variate_generator<boost::lagged_fibonacci607 &, boost::uniform_real<double> > unif_01(m_drng,uniform);
	boost::uniform_int<int> NPless1(0, NP - 2);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_NPless1(m_urng,NPless1);
	boost::uniform_int<int> Nv_(0, Nv - 1);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_Nv(m_urng,Nv_);
	boost::uniform_int<int> Nvless1(0, Nv - 2);
	boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > unif_Nvless1(m_urng,Nvless1);


	//check if we have a symmetric problem (symmetric weight matrix)
	bool is_sym = true;
	for(size_t i = 0; i < Nv; i++)
	{
		for(size_t j = i+1; j < Nv; j++)
		{
			if(weights[i][j] != weights[j][i])
			{
				is_sym = false;
				goto end_loop;
			}
		}
	}
	end_loop:	
	
	//create own local population
	std::vector<decision_vector> my_pop(NP, decision_vector(Nv));

	//check if some individuals in the population that is passed as a function input are feasible.
	bool feasible;
	std::vector<int> not_feasible;
	for (size_t i = 0; i < NP; i++) {
		feasible = prob->feasibility_x(pop.get_individual(i).cur_x);
		if(feasible){ //if feasible store it in my_pop
			switch( prob->get_encoding() ) {
			    case problem::tsp::FULL:
			        my_pop[i] = prob->full2cities(pop.get_individual(i).cur_x);
			        break;
			    case problem::tsp::RANDOMKEYS:
			        my_pop[i] = prob->randomkeys2cities(pop.get_individual(i).cur_x);
			        break;
			    case problem::tsp::CITIES:
			        my_pop[i] = pop.get_individual(i).cur_x;
			        break;
			}
		}
		else
		{
			not_feasible.push_back(i);
		}
	}

	//replace the not feasible individuals by feasible ones	
	int i;		
	switch (m_ini_type){
		case 0:
		{
		//random initialization (produces feasible individuals)
			for (size_t ii = 0; ii < not_feasible.size(); ii++) {
				i = not_feasible[ii];
				for (size_t j = 0; j < Nv; j++) {
					my_pop[i][j] = j;
				}
			}
			int tmp;
			size_t rnd_idx;
			for (size_t j = 1; j < Nv-1; j++) {
	        		boost::uniform_int<int> dist_(j, Nv - 1);
					boost::variate_generator<boost::mt19937 &, boost::uniform_int<int> > dist(m_urng,dist_);
					
				for (size_t ii = 0; ii < not_feasible.size(); ii++) {
					i = not_feasible[ii];
					rnd_idx = dist();
					tmp = my_pop[i][j];
					my_pop[i][j] = my_pop[i][rnd_idx];
					my_pop[i][rnd_idx] = tmp;
				}	

			}
			break;
		}
		case 1:
		{
		//initialize with nearest neighbor algorithm
			int nxt_city;
			size_t min_idx;
			std::vector<int> not_visited(Nv);
			for (size_t ii = 0; ii < not_feasible.size(); ii++) {
				i = not_feasible[ii];
				for (size_t j = 0; j < Nv; j++) {
					not_visited[j] = j;
				}
				my_pop[i][0] = unif_Nv();
				std::swap(not_visited[my_pop[i][0]],not_visited[Nv-1]);
				for (size_t j = 1; j < Nv-1; j++) {
					min_idx = 0;
					nxt_city = not_visited[0];
					for (size_t l = 1; l < Nv-j; l++) {
						if(weights[my_pop[i][j-1]][not_visited[l]] < weights[my_pop[i][j-1]][nxt_city]){
							min_idx = l;		
							nxt_city = not_visited[l];}
					}
					my_pop[i][j] = nxt_city;
					std::swap(not_visited[min_idx],not_visited[Nv-j-1]);
				}
				my_pop[i][Nv-1] = not_visited[0];
			}
			break;
		}
		default:
			pagmo_throw(value_error,"Invalid initialization type");
	}

	//compute fitness of individuals (necessary if weight matrix is not symmetric)
	std::vector<double>  fitness(NP, 0);
	if(!is_sym){
		for(size_t i=0; i < NP; i++){
    			fitness[i] = weights[my_pop[i][Nv-1]][my_pop[i][0]];
    			for(size_t k=1; k < Nv; k++){
        			fitness[i] += weights[my_pop[i][k-1]][my_pop[i][k]];
			}
		}
	}	
	
	decision_vector tmp_tour(Nv);
	bool stop;
	size_t rnd_num, i2, pos1_c1, pos1_c2, pos2_c1, pos2_c2; //pos2_c1 denotes the position of city1 in parent2
	double fitness_change, fitness_tmp = 0;

	//InverOver main loop
	for(int iter = 0; iter < m_gen; iter++){
		for(size_t i1 = 0; i1 < NP; i1++){
			fitness_change = 0;
			tmp_tour = my_pop[i1];
			pos1_c1 = unif_Nv();
			stop = false;
			while(!stop){
				if(unif_01() < m_ri){
					rnd_num = unif_Nvless1();
					pos1_c2 = (rnd_num == pos1_c1? Nv-1:rnd_num);
				}
				else{
					i2 = unif_NPless1();
					i2 = (i2 == i1? NP-1:i2);
					pos2_c1 = std::find(my_pop[i2].begin(),my_pop[i2].end(),tmp_tour[pos1_c1])-my_pop[i2].begin();
					pos2_c2 = (pos2_c1 == Nv-1? 0:pos2_c1+1);
					pos1_c2 = std::find(tmp_tour.begin(),tmp_tour.end(),my_pop[i2][pos2_c2])-tmp_tour.begin();
				}
				stop = (abs(pos1_c1-pos1_c2)==1 || abs(pos1_c1-pos1_c2)==Nv-1);
				if(!stop){
					
					if(pos1_c1<pos1_c2){
						for(size_t l=0; l < (double (pos1_c2-pos1_c1-1)/2); l++){
							std::swap(tmp_tour[pos1_c1+1+l],tmp_tour[pos1_c2-l]);}
						if(is_sym){
							fitness_change -= weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c2]] + weights[tmp_tour[pos1_c1+1]][tmp_tour[pos1_c2+1 - (pos1_c2+1 > Nv-1? Nv:0)]];
							fitness_change += weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c1+1]] + weights[tmp_tour[pos1_c2]][tmp_tour[pos1_c2+1 - (pos1_c2+1 > Nv-1? Nv:0)]];
						}
					}
					else{
						//inverts the section from c1 to c2 (see documentation Note3)
						
						for(size_t l=0; l < (double (Nv-(pos1_c1-pos1_c2)-1)/2); l++){
							std::swap(tmp_tour[pos1_c1+1+l - (pos1_c1+1+l>Nv-1? Nv:0)],tmp_tour[pos1_c2-l + (pos1_c2<l? Nv:0)]);}
						if(is_sym){
							fitness_change -= weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c2]] + weights[tmp_tour[pos1_c1+1 - (pos1_c1+1 > Nv-1? Nv:0)]][tmp_tour[pos1_c2+1]];
							fitness_change += weights[tmp_tour[pos1_c1]][tmp_tour[pos1_c1+1 - (pos1_c1+1 > Nv-1? Nv:0)]] + weights[tmp_tour[pos1_c2]][tmp_tour[pos1_c2+1]];
						}
						
					}
					pos1_c1 = pos1_c2; //better performance than original Inver-Over (shorter tour in less time)
				}
			} //end of while loop (looping over a single indvidual)
			if(!is_sym){ //compute fitness of the temporary tour
    				fitness_tmp = weights[tmp_tour[Nv-1]][tmp_tour[0]];
    				for(size_t k=1; k < Nv; k++){
        				fitness_tmp += weights[tmp_tour[k-1]][tmp_tour[k]];
				}
				fitness_change = fitness_tmp - fitness[i1]; 
			}
	
			if(fitness_change < 0){ //replace individual?
				my_pop[i1] = tmp_tour;
				if(!is_sym){
					fitness[i1] = fitness_tmp;
				}
			}
			
			
		} //end of loop over population
	} //end of loop over generations


	//change representation of tour
    	for (size_t ii = 0; ii < NP; ii++) {
			switch( prob->get_encoding() ) {
			    case problem::tsp::FULL:
			        pop.set_x(ii,prob->cities2full(my_pop[ii]));
			        break;
			    case problem::tsp::RANDOMKEYS:
			        pop.set_x(ii,prob->cities2randomkeys(my_pop[ii],pop.get_individual(ii).cur_x));
			        break;
			    case problem::tsp::CITIES:
			        pop.set_x(ii,my_pop[ii]);
			        break;
			}
		}

    } // end of evolve
Example #11
0
bool random_utils::uniformBool(rngState *state)
{
    return uniform(state, 0.0, 1.0) <= 0.5;
}
Example #12
0
void Program::setUniform(const GLchar* name, const glm::mat4& m, GLboolean transpose)
{
	assert(IsInUse());
    glUniformMatrix4fv(uniform(name), 1, transpose, glm::value_ptr(m));
}
Example #13
0
void Program::setUniformMatrix4(const GLchar* name, const GLfloat* v, GLsizei count, GLboolean transpose) {
    assert(IsInUse());
    glUniformMatrix4fv(uniform(name), count, transpose, v);
}
Example #14
0
 void UniformHandler::texRectUniform(const char* _name, GLuint _texId, QSize _size) {
   texUniform(_name,_texId,GL_TEXTURE_RECTANGLE);
   uniform(
     QString(QString(_name) + "_size").toUtf8().data(),
     QVector2D(_size.width(),_size.height()));
 }
Example #15
0
// Generates an exponential random number using a uniform random number
double exponential(float lambda=1)
{
	return -lambda*log(uniform());
}
void PartitionExamples(void **data, int *num_data_ptr, int num_features,
		       uchar **train_members_ptr, int *num_train_ptr,
		       uchar **test_members_ptr, int *num_test_ptr,
		       uchar **prune_members_ptr, int *num_prune_ptr,
		       double train_pct, double prune_pct, double test_pct,
		       SSVINFO *ssvinfo)
{
  uchar *train_members, *test_members, *prune_members;
  uchar *assigned;
  int num_train, num_test, num_prune;
  int example, num_data = *num_data_ptr;
  int i, j, idx, num_not_assigned;

  /* Keep track of the examples that are assinged to one of the sets: train,
     prune and test. */
  assigned = CREATE_BITARRAY(num_data);
  ZERO_BITARRAY(assigned, num_data);
  num_not_assigned = num_data;

  /* --- Training examples. --- */
  num_train = (int) rint(num_data * train_pct);
  train_members = CREATE_BITARRAY(num_data);
  ZERO_BITARRAY(train_members, num_data);
  for (example = 0; (example < num_train) && (num_not_assigned>0); example++) {
    /* Assign one of the unassigned examples. */
    idx = (int) uniform(0.0, (double) num_not_assigned);
    for (i = j = 0; j < idx || READ_BITARRAY(assigned, i); i++) 
      if (!READ_BITARRAY(assigned, i))
	j++;
    WRITE_BITARRAY(train_members, i, 1);
    WRITE_BITARRAY(assigned, i, 1);
    num_not_assigned--;
  }
  num_train = example;
  /* --- Test examples. --- */
  num_test = (int) rint(num_data * test_pct);
  test_members = CREATE_BITARRAY(num_data);
  ZERO_BITARRAY(test_members, num_data);
  for (example = 0; (example < num_test) && (num_not_assigned>0); example++) {
    /* Assign one of the unassigned examples. */
    idx = (int) uniform(0.0, (double) num_not_assigned);
    for (i = j = 0; j < idx || READ_BITARRAY(assigned, i); i++)
      if (!READ_BITARRAY(assigned, i))
	j++;
    WRITE_BITARRAY(test_members, i, 1);
    WRITE_BITARRAY(assigned, i, 1);
    num_not_assigned--;
  }
  num_test = example;
  /* --- Pruning examples. --- */
  num_prune = (int) rint(num_data * prune_pct);
  prune_members = CREATE_BITARRAY(num_data);
  ZERO_BITARRAY(prune_members, num_data);
  for (example = 0; (example < num_prune) && (num_not_assigned>0); example++) {
    /* Assign one of the unassigned examples. */
    idx = (int) uniform(0.0, (double) num_not_assigned);
    for (i = j = 0; j < idx || READ_BITARRAY(assigned, i); i++)
      if (!READ_BITARRAY(assigned, i))
	j++;
    WRITE_BITARRAY(prune_members, i, 1);
    WRITE_BITARRAY(assigned, i, 1);
    num_not_assigned--;
  }
  num_prune = example;

  free(assigned);

  *train_members_ptr = train_members; *num_train_ptr = num_train;
  *test_members_ptr = test_members; *num_test_ptr = num_test;
  *prune_members_ptr = prune_members; *num_prune_ptr = num_prune;
  
}
Example #17
0
// Generates a rayleigh random number using a uniform random number
double rayleigh(float sigma=1)
{
	return sigma*sqrt(-1*log(uniform()));
}
Example #18
0
  void RecurrentNeuralNetworkPartOfSpeechTagger<F>::Train(
      const std::vector<TaggedSentence> &tagged_sentences,
      F learning_rate,
      F momentum,
      F lambda_1,
      F lambda_2,
      int iterations,
      Evaluator<F> &evaluator,
      const std::vector<TaggedSentence> &validation_sentences,
      const std::unordered_set<std::string> &training_vocabulary) {

    // InitializeBlob(uniform_symmetric, generator, &recurrent_state_input);
    InitializeBlob(uniform_symmetric, generator, &classify_weights);
    InitializeBlob(uniform_symmetric, generator, &combine_weights);

    std::uniform_int_distribution<int> uniform(0, tagged_sentences.size() - 1);

    constexpr auto minibatch_size = 100;

    std::cout << "Training... " << std::endl << std::endl;
    for (auto i = 0; i < iterations; ++i) {
      std::cout << "Evaluating on validation data... ";
      std::cout.flush();
      auto validation_report = evaluator.Evaluate(
          *this, validation_sentences, training_vocabulary);
      std::cout << "Done." << std::endl;
      std::cout << validation_report << std::endl<< std::endl;
      std::cout << "Starting iteration " << i << "... " << std::endl << std::endl;
      
      for (auto j = 0; j < tagged_sentences.size(); ++j) {
        // auto u = uniform(generator);
        auto u = j;
        ForwardBackwardCpu(tagged_sentences.at(u));

        classify_weights.ClipGradient(tagged_sentences.at(u).size());
        classify_bias.ClipGradient(tagged_sentences.at(u).size());
        combine_weights.ClipGradient(tagged_sentences.at(u).size());
        combine_bias.ClipGradient(tagged_sentences.at(u).size());

        classify_weights.L1Regularize(lambda_1);
        classify_bias.L1Regularize(lambda_1);
        combine_weights.L1Regularize(lambda_1);
        combine_bias.L1Regularize(lambda_1);

        auto magnitude = sqrt(classify_weights.values.SquareMagnitude()
            + classify_weights.values.SquareMagnitude()
            + combine_weights.values.SquareMagnitude()
            + combine_bias.values.SquareMagnitude());

        classify_weights.L2Regularize(lambda_2, magnitude);
        classify_bias.L2Regularize(lambda_2, magnitude);
        combine_weights.L2Regularize(lambda_2, magnitude);
        combine_bias.L2Regularize(lambda_2, magnitude);

        // auto difference_magnitude = sqrt(classify_weights.differences.SquareMagnitude()
        //     + classify_weights.differences.SquareMagnitude()
        //     + combine_weights.differences.SquareMagnitude()
        //     + combine_bias.differences.SquareMagnitude());

        // classify_weights.ClipGradient(difference_magnitude);
        // classify_bias.ClipGradient(difference_magnitude);
        // combine_weights.ClipGradient(difference_magnitude);
        // combine_bias.ClipGradient(difference_magnitude);

        // const auto modified_learning_rate = learning_rate * pow(F(0.1), i / 2.0);
        const auto modified_learning_rate = learning_rate;

        classify_weights.UpdateMomentum(modified_learning_rate, momentum);
        classify_bias.UpdateMomentum(modified_learning_rate, momentum);
        combine_weights.UpdateMomentum(modified_learning_rate, momentum);
        combine_bias.UpdateMomentum(modified_learning_rate, momentum);

        constexpr auto kAdaDeltaMemory = F(0.95);

        // classify_weights.UpdateAdaDelta(modified_learning_rate, kAdaDeltaMemory);
        // classify_bias.UpdateAdaDelta(modified_learning_rate, kAdaDeltaMemory);
        // combine_weights.UpdateAdaDelta(modified_learning_rate, kAdaDeltaMemory);
        // combine_bias.UpdateAdaDelta(modified_learning_rate, kAdaDeltaMemory);

        // classify_weights.UpdateAdaGrad(modified_learning_rate);
        // classify_bias.UpdateAdaGrad(modified_learning_rate);
        // combine_weights.UpdateAdaGrad(modified_learning_rate);
        // combine_bias.UpdateAdaGrad(modified_learning_rate);

        classify_weights.differences.Reset();
        classify_bias.differences.Reset();
        combine_weights.differences.Reset();
        combine_bias.differences.Reset();

        // if (j > 0 && j % 100 == 0) {
        //   std::cout << "Finished " << j << " sentences." << std::endl;
        // }

        if (j > 0 && j + 1 < tagged_sentences.size() && j % 1000 == 0) {
          // std::cout << std::endl << "Evaluating on validation data... ";
          // std::cout.flush();
          auto validation_report = evaluator.Evaluate(
              *this, validation_sentences, training_vocabulary);
          std::cout << "Done." << std::endl;
          std::cout << validation_report << std::endl << std::endl;
        }
      }
      std::cout << "Done." << std::endl << std::endl;
    }
    std::cout << "Done." << std::endl << std::endl;
  }
Example #19
0
void Shader::uniformI(const std::string &name, int i1, int i2)
{
    glf->glUniform2i(uniform(name), i1, i2);
}
Example #20
0
 double uniform(double a, double b) {
   assert(b != a);
   return a + (b-a)*uniform();
 }
Example #21
0
void Shader::uniformF(const std::string &name, float f)
{
    glf->glUniform1f(uniform(name), f);
}
Example #22
0
  return x+5;
}


__attribute__((vector(mask,uniform (y), linear(x:1))))
__attribute__((vector (nomask, uniform (x), linear(y:1))))
int func2 (int x, int y)
{
  return x+y;
}

int func4 (int x, int y) __attribute__((vector, vector (nomask), vector (uniform(y), linear(x:1))));


template <class T, class R>
__attribute__((vector, vector(mask,uniform (y), linear(x:1))))
T func3 (T x, R y)
{
  return x+(T)y;
}



int main (void)
{
  if ((func3 (5, 4) + func2 (5, 4) + func (5) + (int) func3<long, int> (5, 4)) != 
      (5 + 4) + (5 + 4) + (5 + 5) + (int) ((long)5 +(int)4))
    __builtin_abort ();
  return 0;
}
Example #23
0
void Shader::uniformF(const std::string &name, float f1, float f2, float f3, float f4)
{
    glf->glUniform4f(uniform(name), f1, f2, f3, f4);
}
Example #24
0
File: lab6.cpp Project: ZOO-OO/IU9
	void setUniform(const GLchar *name, const glm::mat4 &v, GLboolean transpose = false) {
		glUniformMatrix4fv(uniform(name), 1, transpose, glm::value_ptr(v));
	}
Example #25
0
void Shader::uniformMat(const std::string &name, const Mat4f &m, bool transpose)
{
    glf->glUniformMatrix4fv(uniform(name), 1, transpose, m.data());
}
Example #26
0
VehicleObject *GameWorld::createVehicle(const uint16_t id, const glm::vec3& pos, const glm::quat& rot, GameObjectID gid)
{
	auto vti = data->findObjectType<VehicleData>(id);
	if( vti ) {
		logger->info("World", "Creating Vehicle ID " + std::to_string(id) + " (" + vti->gameName + ")");
		
		if(! vti->modelName.empty()) {
			data->loadDFF(vti->modelName + ".dff");
		}
		if(! vti->textureName.empty()) {
			data->loadTXD(vti->textureName + ".txd");
		}
		
		glm::u8vec3 prim(255), sec(128);
		auto palit = data->vehiclePalettes.find(vti->modelName); // modelname is conveniently lowercase (usually)
		if(palit != data->vehiclePalettes.end() && palit->second.size() > 0 ) {
			 std::uniform_int_distribution<int> uniform(0, palit->second.size()-1);
			 int set = uniform(randomEngine);
			 prim = data->vehicleColours[palit->second[set].first];
			 sec = data->vehicleColours[palit->second[set].second];
		}
		else {
			logger->warning("World", "No colour palette for vehicle " + vti->modelName);
		}
		
		auto wi = data->findObjectType<ObjectData>(vti->wheelModelID);
		if( wi )
		{
			if(! wi->textureName.empty()) {
				data->loadTXD(wi->textureName + ".txd");
			}
		}
		
		ModelRef& m = data->models[vti->modelName];
		auto model = m->resource;
		auto info = data->vehicleInfo.find(vti->handlingID);
		if(model && info != data->vehicleInfo.end()) {
			if( info->second->wheels.size() == 0 && info->second->seats.size() == 0 ) {
				for( const ModelFrame* f : model->frames ) {
					const std::string& name = f->getName();
					
					if( name.size() > 5 && name.substr(0, 5) == "wheel" ) {
						auto frameTrans = f->getMatrix();
						info->second->wheels.push_back({glm::vec3(frameTrans[3])});
					}
					if(name.size() > 3 && name.substr(0, 3) == "ped" && name.substr(name.size()-4) == "seat") {
						auto p = f->getDefaultTranslation();
						p.x = p.x * -1.f;
						info->second->seats.push_back({p});
						p.x = p.x * -1.f;
						info->second->seats.push_back({p});
					}
				}
			}
		}

		auto vehicle = new VehicleObject{ this, pos, rot, m, vti, info->second, prim, sec };
		vehicle->setGameObjectID(gid);

		vehiclePool.insert( vehicle );
        allObjects.push_back( vehicle );

		return vehicle;
	}
	return nullptr;
}
	const ShaderProgram& uniform(int location, double v0) const { return uniform(location, (float)v0); }
Example #28
0
static void test_vector(const char *glsl_type, const char * suffix,
		void (GLAPIENTRY *uniform)(GLint, GLsizei, const GLfloat*))
{
	char buffer[2*sizeof(vs_vector_template)];
	GLuint vs, fs;
	GLuint program;
	GLint loc_a, loc_b, loc_c;
	GLint loc_b2;

	snprintf(buffer, sizeof(buffer), vs_vector_template,
		glsl_type, glsl_type, glsl_type, glsl_type);
	vs = compile_shader(GL_VERTEX_SHADER_ARB, buffer);

	snprintf(buffer, sizeof(buffer), fs_vector_template,
		glsl_type, suffix);
	fs = compile_shader(GL_FRAGMENT_SHADER_ARB, buffer);

	program = link_program(vs, fs);

	glUseProgramObjectARB(program);
	loc_a = glGetUniformLocationARB(program, "a");
	loc_b = glGetUniformLocationARB(program, "b");
	loc_c = glGetUniformLocationARB(program, "c");
	loc_b2 = glGetUniformLocationARB(program, "b[2]");
	printf("locations: a: %i b: %i c: %i b[2]: %i\n", loc_a, loc_b, loc_c, loc_b);

	expect_error(GL_NO_ERROR, "Type %s: Sanity check", glsl_type);
	uniform(loc_a, 0, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 0 to a", glsl_type);
	uniform(loc_a, 1, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 1 to a", glsl_type);
	uniform(loc_a, 2, lots_of_zeros);
	expect_error(GL_INVALID_OPERATION, "Type %s: Write count = 2 to a", glsl_type);
	uniform(loc_a, 1024, lots_of_zeros);
	expect_error(GL_INVALID_OPERATION, "Type %s: Write count = 1024 to a", glsl_type);

	uniform(loc_b, 0, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 0 to b", glsl_type);
	uniform(loc_b, 1, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 1 to b", glsl_type);
	uniform(loc_b, 4, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 4 to b", glsl_type);

	/* Note: The following are out of bound for the array,
	 * but the spec situation is a bit unclear as to whether errors
	 * should be generated.
	 *
	 * Issue #32 of the ARB_shader_objects spec suggests errors
	 * should be generated when writing out-of-bounds on arrays,
	 * but this is not reflected in the OpenGL spec.
	 *
	 * The main point of these tests is to make sure the driver
	 * does not introduce memory errors by accessing internal arrays
	 * out of bounds.
	 */
	uniform(loc_b, 5, lots_of_zeros);
	(void) glGetError(); /* Eat generated error, if any */

	uniform(loc_c, 0, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 0 to c", glsl_type);
	uniform(loc_c, 1, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 1 to c", glsl_type);
	uniform(loc_c, 4, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 4 to c", glsl_type);

	/* Out of bounds; see comment above */
	uniform(loc_c, 5, lots_of_zeros);
	(void) glGetError(); /* Eat generated error, if any */

	uniform(loc_b2, 0, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 0 to b[2]", glsl_type);
	uniform(loc_b2, 2, lots_of_zeros);
	expect_error(GL_NO_ERROR, "Type %s: Write count = 2 to b[2]", glsl_type);

	/* Out of bounds; see comment above */
	uniform(loc_b2, 1024, lots_of_zeros);
	(void) glGetError(); /* Eat generated error, if any */

	glDeleteObjectARB(fs);
	glDeleteObjectARB(vs);
	glDeleteObjectARB(program);
}
	const ShaderProgram& uniform(const char * name, const Vec<2,T>& v) const {
		return uniform(name, v.x, v.y);
	}
Example #30
0
double sample_from_interval(const std::pair<double,double>& interval)
{
  return interval.first + uniform()*(interval.second - interval.first);
}