Esempio n. 1
0
void updateMRF1perfect_onedelta(int g,vector<int> &valueLower,
				vector<int> &valueUpper,
				const vector<double> &potOn,
				const vector<double> &potOff,
				const vector<vector<int> > &neighbour,
				double eta0,double omega0,double kappa,
				Random &ran) {
  double potLower = potOff[g] - potOn[g];
  double potUpper = potOff[g] - potOn[g];

  // add potential for clique centered at gene g

  int n = neighbour[g].size();
  double omega;
  if (n > 0)
    omega = omega0 * ((double) n) / (kappa + ((double) n));
  else
    omega = 0.0;

  double meanLower = 0.0;
  double meanUpper = 0.0;
  int gg;
  for (gg = 0; gg < neighbour[g].size(); gg++) {
    meanLower += (double) valueLower[neighbour[g][gg]];
    meanUpper += (double) valueUpper[neighbour[g][gg]];
  }

  if (neighbour[g].size() > 0) {
    meanLower /= (double) neighbour[g].size();
    meanUpper /= (double) neighbour[g].size();

    meanLower = (1.0 - omega) * eta0 + omega * meanLower;
    meanUpper = (1.0 - omega) * eta0 + omega * meanUpper;
  }
  else {
    meanLower = eta0;
    meanUpper = eta0;
  }

  potUpper += - log(1.0 - meanLower) + log(meanLower);
  potLower += - log(1.0 - meanUpper) + log(meanUpper);

  // add potential for cliques centered at each gene connected to g

  for (gg = 0; gg < neighbour[g].size(); gg++) {
    int gene = neighbour[g][gg];
    int n = neighbour[gene].size();
    double omega;
    if (n > 0)
      omega = omega0 * ((double) n) / (kappa + ((double) n));
    else
      omega = 0.0;

    double meanLower = 0.0;
    double meanUpper = 0.0;
    int ggg;
    for (ggg = 0; ggg < neighbour[gene].size(); ggg++) {
      if (neighbour[gene][ggg] != g) {
	meanLower += (double) valueLower[neighbour[gene][ggg]];
	meanUpper += (double) valueUpper[neighbour[gene][ggg]];
      }
    }

    meanLower /= (double) neighbour[gene].size();
    meanUpper /= (double) neighbour[gene].size();

    meanLower = (1.0 - omega) * eta0 + omega * meanLower;
    meanUpper = (1.0 - omega) * eta0 + omega * meanUpper;

    double extra = omega / ((double) neighbour[gene].size());

    if (valueLower[gene] == 0 && valueUpper[gene] == 0) {
      potUpper += - log(1.0 - meanUpper) + log(1.0 - meanUpper - extra);
      potLower += - log(1.0 - meanLower) + log(1.0 - meanLower - extra);
    }
    else if (valueLower[gene] == 1 && valueUpper[gene] == 1) {
      potUpper += - log(meanUpper) + log(meanUpper + extra);
      potLower += - log(meanLower) + log(meanLower + extra);
    }
    else {
      double p0Upper = - log(1.0 - meanUpper) + log(1.0 - meanUpper - extra);
      double p0Lower = - log(1.0 - meanLower) + log(1.0 - meanLower - extra);

      double p1Upper = - log(meanUpper) + log(meanUpper + extra);
      double p1Lower = - log(meanLower) + log(meanLower + extra);

      if (p0Lower < p1Lower)
	potLower += p1Lower;
      else
	potLower += p0Lower;

      if (p0Upper < p1Upper)
	potUpper += p0Upper;
      else
	potUpper += p1Upper;
    }
  }

  double probLower;
  if (potUpper > 0.0)
    probLower = 1.0 / (1.0 + exp(- potUpper));
  else
    probLower = exp(potUpper) / (1.0 + exp(potUpper));

  double probUpper;
  if (potLower > 0.0)
    probUpper = 1.0 / (1.0 + exp(- potLower));
  else
    probUpper = exp(potLower) / (1.0 + exp(potLower));

  double u = ran.Unif01();
  if (u < probLower)
    valueLower[g] = 1;
  else
    valueLower[g] = 0;

  if (u < probUpper)
    valueUpper[g] = 1;
  else
    valueUpper[g] = 0;

  return;
}
Esempio n. 2
0
float Phys_Dmg(double* P_Atk, double* E_Def, float& P_Dmg){
  float var = random.fromFirstToLast(0.9f, 1.1f);
  P_Dmg = (*P_Atk / 1000) + *P_Atk - *E_Def;
  P_Dmg *= var;
  return P_Dmg;
}
Esempio n. 3
0
// LookForWorkers
//------------------------------------------------------------------------------
void Client::LookForWorkers()
{
	PROFILE_FUNCTION

	MutexHolder mh( m_ServerListMutex );

	const size_t numWorkers( m_ServerList.GetSize() );

	// find out how many connections we have now
	size_t numConnections = 0;
	for ( size_t i=0; i<numWorkers; i++ )
	{
		if ( m_ServerList[ i ].m_Connection )
		{
			numConnections++;
		}
	}

	// limit maximum concurrent connections
	if ( numConnections >= CONNECTION_LIMIT )
	{
		return;
	}

	// if we're connected to every possible worker already
	if ( numConnections == numWorkers )
	{
		return;
	}

	// randomize the start index to better distribute workers when there 
	// are many workers/clients - otherwise all clients will attempt to connect 
	// to the first CONNECTION_LIMIT workers
	Random r;
	size_t startIndex = r.GetRandIndex( (uint32_t)numWorkers );

	// find someone to connect to
	for ( size_t j=0; j<numWorkers; j++ )
	{
		const size_t i( ( j + startIndex ) % numWorkers );

		ServerState & ss = m_ServerList[ i ];
		if ( ss.m_Connection )
		{
			continue;
		}

		// ignore blacklisted workers
		if ( ss.m_Blacklisted )
		{
			continue;
		}

		// lock the server state
		MutexHolder mhSS( ss.m_Mutex );

		ASSERT( ss.m_Jobs.IsEmpty() );

		if ( ss.m_DelayTimer.GetElapsed() < CONNECTION_REATTEMPT_DELAY_TIME )
		{
			continue;
		}

		const ConnectionInfo * ci = Connect( m_WorkerList[ i ], Protocol::PROTOCOL_PORT );
		if ( ci == nullptr )
		{
			ss.m_DelayTimer.Start(); // reset connection attempt delay
		}
		else
		{
			const uint32_t numJobsAvailable( JobQueue::IsValid() ? (uint32_t)JobQueue::Get().GetNumDistributableJobsAvailable() : 0 );

			ci->SetUserData( &ss );
			ss.m_Connection = ci; // success!
			ss.m_NumJobsAvailable = numJobsAvailable;
			ss.m_StatusTimer.Start();

			// send connection msg
			Protocol::MsgConnection msg( numJobsAvailable );
			MutexHolder mh2( ss.m_Mutex );
			msg.Send( ci );
		}

		// limit to one connection attempt per iteration
		return;
	}
}
Esempio n. 4
0
static Imath::Color3f randomColor( Random &r, int seed )
{
	return r.randomColor( std::max( seed, 0 ) );
}
bool TimeSeriesClassificationData::spiltDataIntoKFolds(const UINT K,const bool useStratifiedSampling){

    crossValidationSetup = false;
    crossValidationIndexs.clear();

    //K can not be zero
    if( K > totalNumSamples ){
        errorLog << "spiltDataIntoKFolds(UINT K) - K can not be zero!" << endl;
        return false;
    }

    //K can not be larger than the number of examples
    if( K > totalNumSamples ){
        errorLog << "spiltDataIntoKFolds(UINT K,bool useStratifiedSampling) - K can not be larger than the total number of samples in the dataset!" << endl;
        return false;
    }

    //K can not be larger than the number of examples in a specific class if the stratified sampling option is true
    if( useStratifiedSampling ){
        for(UINT c=0; c<classTracker.size(); c++){
            if( K > classTracker[c].counter ){
                errorLog << "spiltDataIntoKFolds(UINT K,bool useStratifiedSampling) - K can not be larger than the number of samples in any given class!" << endl;
                return false;
            }
        }
    }

    //Setup the dataset for k-fold cross validation
    kFoldValue = K;
    vector< UINT > indexs( totalNumSamples );

    //Work out how many samples are in each fold, the last fold might have more samples than the others
    UINT numSamplesPerFold = (UINT) floor( totalNumSamples/double(K) );

    //Resize the cross validation indexs buffer
    crossValidationIndexs.resize( K );

    //Create the random partion indexs
    Random random;
    UINT randomIndex = 0;

    if( useStratifiedSampling ){
        //Break the data into seperate classes
        vector< vector< UINT > > classData( getNumClasses() );

        //Add the indexs to their respective classes
        for(UINT i=0; i<totalNumSamples; i++){
            classData[ getClassLabelIndexValue( data[i].getClassLabel() ) ].push_back( i );
        }

        //Randomize the order of the indexs in each of the class index buffers
        for(UINT c=0; c<getNumClasses(); c++){
            UINT numSamples = (UINT)classData[c].size();
            for(UINT x=0; x<numSamples; x++){
                //Pick a random index
                randomIndex = random.getRandomNumberInt(0,numSamples);

                //Swap the indexs
                SWAP( classData[c][ x ] , classData[c][ randomIndex ] );
            }
        }

        //Loop over each of the classes and add the data equally to each of the k folds until there is no data left
        vector< UINT >::iterator iter;
        for(UINT c=0; c<getNumClasses(); c++){
            iter = classData[ c ].begin();
            UINT k = 0;
            while( iter != classData[c].end() ){
                crossValidationIndexs[ k ].push_back( *iter );
                iter++;
                k++;
                k = k % K;
            }
        }

    }else{
        //Randomize the order of the data
        for(UINT i=0; i<totalNumSamples; i++) indexs[i] = i;
        for(UINT x=0; x<totalNumSamples; x++){
            //Pick a random index
            randomIndex = random.getRandomNumberInt(0,totalNumSamples);

            //Swap the indexs
            SWAP( indexs[ x ] , indexs[ randomIndex ] );
        }

        UINT counter = 0;
        UINT foldIndex = 0;
        for(UINT i=0; i<totalNumSamples; i++){
            //Add the index to the current fold
            crossValidationIndexs[ foldIndex ].push_back( indexs[i] );

            //Move to the next fold if ready
            if( ++counter == numSamplesPerFold && foldIndex < K-1 ){
                foldIndex++;
                counter = 0;
            }
        }
    }

    crossValidationSetup = true;
    return true;

}
Esempio n. 6
0
    //------------------------------------------------------------------------------
    /// 渡された Stage に対しマップを生成します。
    ///
    /// @param[in]      aNumber ステージ番号
    /// @param[in,out]  aStage  ステージ情報。関数を呼ぶと書き換えられます。
    /// @param[in,out]  aRandom 乱数
    void LevelDesigner::Setup(int aNumber, Stage& aStage, Random& aRandom)
    {
        int width = Parameter::FieldWidthMin;
        width += aRandom.randTerm((Parameter::FieldWidthMax - Parameter::FieldWidthMin) / 4 + 1) * 4;
        int height = Parameter::FieldHeightMin;
        height += aRandom.randTerm((Parameter::FieldHeightMax - Parameter::FieldHeightMin) / 4 + 1) * 4;

        // ステージ番号から、壁密度、時間帯指定されている荷物の割合、荷物数を決める。
        int wallDensityIndex = aNumber % Parameter::WallDensityMax;
        int periodSpecifiedIndex = (aNumber / Parameter::WallDensityMax) % Parameter::PeriodSpecifiedMax;
        int itemCountIndex = (aNumber / (Parameter::WallDensityMax * Parameter::PeriodSpecifiedMax)) % Parameter::ItemCountMax;

        // wallDensity は 0 にはならないようにする。あまりにも壁がない迷路になるため。
        int wallDensity = (wallDensityIndex + 1) * (100 / Parameter::WallDensityMax);

        // 時間帯指定されている荷物の個数。割合がそのまま適用される。端数は切り捨て。
        int itemCount = itemCountIndex + 1;
        int periodSpecifiedCount = itemCount * periodSpecifiedIndex / (Parameter::PeriodSpecifiedMax - 1);

        // フィールド生成
        aStage.field().setup(width, height, wallDensity, aRandom);

        // 荷物の生成
        aStage.items().reset();
        int weightHistogram[Parameter::ItemWeightMax + 1] = { 0 };
        int itemWeights[Parameter::ItemCountMax];
        int periodSpecs[Parameter::ItemCountMax];
        for (int i = 0; i < itemCount; ++i) {
            int w;
            do {
                w = aRandom.randMinMax(Parameter::ItemWeightMin, Parameter::ItemWeightMax);
            } while (weightHistogram[w] >= Parameter::WeightHistogramMax);
            itemWeights[i] = w;
            weightHistogram[w]++;
            periodSpecs[i] = -1;
        }

        // 時間帯指定を決める。
        int periodItemWeightSum[Parameter::PeriodCount] = { 0 };
        for (int i = 0; i < periodSpecifiedCount; ++i) {
            for (;;) {
                int ix = aRandom.randTerm(itemCount);
                if (periodSpecs[ix] == -1) {
                    int p = aRandom.randTerm(Parameter::PeriodCount);
                    if (periodItemWeightSum[p] + itemWeights[ix] <= Parameter::TruckWeightCapacity) {
                        periodSpecs[ix] = p;
                        periodItemWeightSum[p] += itemWeights[ix];
                        break;
                    }
                }
            }
        }

        // 荷物を生成
        for (int i = 0; i < itemCount; i++) {
            Pos pos;
            for (;;) {
                int x = aRandom.randTerm(width);
                int y = aRandom.randTerm(height);
                pos = Pos(x, y);
                if (aStage.field().isWall(pos)) {
                    continue;
                }
                if (aStage.field().officePos() == pos) {
                    continue;
                }
                bool overlaps = false;
                for (int j = 0; j < i; ++j) {
                    if (aStage.items()[j].destination() == pos) {
                        overlaps = true;
                        break;
                    }
                }
                if (overlaps) {
                    continue;
                }
                break;
            }
            aStage.items().addItem(pos, periodSpecs[i], itemWeights[i]);
        }
    }
Esempio n. 7
0
void Pbc::test(){
  Random r;
  r.setSeed(-20);
  for(int i=0;i<1000;i++){
// random matrix with some zero element
    Tensor box;
    for(int j=0;j<3;j++) for(int k=0;k<3;k++) if(r.U01()>0.2){
      box[j][k]=2.0*r.U01()-1.0;
    }
    int boxtype=i%10;
    switch(boxtype){
    case 0:
// cubic
      for(int j=0;j<3;j++) for(int k=0;k<3;k++) if(j!=k) box[j][k]=0.0;
      for(int j=1;j<3;j++) box[j][j]=box[0][0];
      break;
    case 1:
// orthorombic
      for(int j=0;j<3;j++) for(int k=0;k<3;k++) if(j!=k) box[j][k]=0.0;
      break;
    case 2:
// hexagonal
      {
      int perm=r.U01()*100;
      Vector a;
      a(0)=r.U01()*2-2; a(1)=0.0;a(2)=0.0;
      double d=r.U01()*2-2;
      Vector b(0.0,d,0.0);
      Vector c(0.0,0.5*d,sqrt(3.0)*d*0.5);
      box.setRow((perm+0)%3,a);
      box.setRow((perm+1)%3,b);
      box.setRow((perm+2)%3,c);
      }
      break;
    case 3:
// bcc
      {
      int perm=r.U01()*100;
      double d=r.U01()*2-2;
      Vector a(d,d,d);
      Vector b(d,-d,d);
      Vector c(d,d,-d);
      box.setRow((perm+0)%3,a);
      box.setRow((perm+1)%3,b);
      box.setRow((perm+2)%3,c);
      }
      break;
    case 4:
// fcc
      {
      int perm=r.U01()*100;
      double d=r.U01()*2-2;
      Vector a(d,d,0);
      Vector b(d,0,d);
      Vector c(0,d,d);
      box.setRow((perm+0)%3,a);
      box.setRow((perm+1)%3,b);
      box.setRow((perm+2)%3,c);
      }
      break;
    default:
// triclinic
      break;
    }

    Pbc pbc;
    pbc.setBox(box);
    std::cerr<<"( "<<boxtype<<" )\n";
    std::cerr<<"Box:";
    for(int j=0;j<3;j++) for(int k=0;k<3;k++) std::cerr<<" "<<box[j][k];
    std::cerr<<"\n";
    std::cerr<<"Determinant: "<<determinant(box)<<"\n";
    std::cerr<<"Shifts:";
    for(int j=0;j<2;j++) for(int k=0;k<2;k++) for(int l=0;l<2;l++) std::cerr<<" "<<pbc.shifts[j][k][l].size();
    std::cerr<<"\n";
    int nshifts=0;
    int ntot=10000;
    for(int j=0;j<ntot;j++){
      Vector v(r.U01()-0.5,r.U01()-0.5,r.U01()-0.5);
      v*=5;
      for(int j=0;j<3;j++) if(r.U01()>0.2) v(j)=0.0;
      Vector full(v);
      Vector fast=pbc.distance(Vector(0,0,0),v,&nshifts);
      full=fast;

      pbc.fullSearch(full);
  
     if(modulo2(fast-full)>1e-10) {
        std::cerr<<"orig "<<v[0]<<" "<<v[1]<<" "<<v[2]<<"\n";
        std::cerr<<"fast "<<fast[0]<<" "<<fast[1]<<" "<<fast[2]<<"\n";
        std::cerr<<"full "<<full[0]<<" "<<full[1]<<" "<<full[2]<<"\n";
        std::cerr<<"diff "<<modulo2(fast)-modulo2(full)<<std::endl;
        if(std::fabs(modulo2(fast)-modulo2(full))>1e-15) plumed_error();
     }
    }
    std::cerr<<"Average number of shifts: "<<double(nshifts)/double(ntot)<<"\n";
  }
}
Esempio n. 8
0
void RoomGenerator::generateBrownianSolidity(TileMap *tileMap, Random &random)
{
	tileMap->clear(0, TileMap::FLAG_SOLID);

	int w = tileMap->getWidth();
	int h = tileMap->getHeight();

	int x = w / 2;
	int y = h / 2;
	int cleared = 0;

	while(cleared / (double)(w * h) < 0.35)
	{
		// Clear 3x3 tiles around x, y
		for (int yo = -1; yo <= 1; yo++)
		{
			for (int xo = -1; xo <= 1; xo++)
			{
				int rx = x + xo;
				int ry = y + yo;

				if (tileMap->isSolid(rx, ry))
				{
					cleared++;
					tileMap->setFlags(rx, ry, 0);
				}
			}
		}

		x += random.getInt(-2, 2);
		y += random.getInt(-2, 2);

		if (x <= 2 || y <= 2 || x >= w - 2 || y >= h - 3)
		{
			x = w / 2;
			y = h / 2;

			for (int i = 0; i < 30; i++)
			{
				int nx = random.getInt(3, w - 3);
				int ny = random.getInt(3, h - 4);

				if (tileMap->getFlags(nx, ny) == 0)
				{
					x = nx;
					y = ny;
					break;
				}
			}
		}
	}

	// Sprinkle some pillars
	int numPillars = random.getInt(cleared / 20, cleared / 70);

	for (int i = 0; i < numPillars; i++)
	{
		bool good = true;
		x = random.getInt(w);
		y = random.getInt(h);

		for (int yo = -1; yo <= 1; yo++)
		{
			for (int xo = -1; xo <= 1; xo++)
			{
				if (tileMap->isSolid(x + xo, y + yo))
				{
					good = false;
				}
			}
		}

		if (good)
			tileMap->setFlags(x, y, TileMap::FLAG_SOLID);
	}
}
Esempio n. 9
0
void
StringSet::scramble_explicit(Random& rnd)
{
  rnd.scramble(data_);
}
Esempio n. 10
0
Vector3 Vector3::random(Random& r) {
	Vector3 result;
	r.sphere(result.x, result.y, result.z);
	return result;
}
Esempio n. 11
0
void RoomGenerator::generateCastleMood(TileMap *tileMap, Random &random)
{
	tileMap->clear(0, TileMap::FLAG_SOLID);
	int w = tileMap->getWidth();
	int h = tileMap->getHeight();

	//storlek på rum
	int minWidth = 4;
	int minHeight = 4;
	int maxWidth = 15;
	int maxHeight = 15;

	int minNbrOfRooms = 15;
	int maxNbrOfRooms = 25;

	int pathWidth = 2;

	int lastX1 = -1;
	int lastY1 = -1;
	int lastX2 = -1;
	int lastY2 = -1;

	for(int i = 0; i < random.getInt(minNbrOfRooms, maxNbrOfRooms); i++)
	{
		int x1 = random.getInt(2, w - maxWidth - 1);
		int y1 = random.getInt(2, h - maxHeight - 1);
		int x2 = x1+random.getInt(minWidth, maxWidth);
		int y2 = y1+random.getInt(minHeight, maxHeight);

		bool didTouchOtherRoom = false;
		for(int x = x1; x < x2; x++)
		{
			for(int y = y1; y < y2; y++)
			{
				if(tileMap->getFlags(x, y) == 0)
					didTouchOtherRoom = true;
				tileMap->setFlags(x, y, 0);
			}
		}

		if(lastX1 != -1) //finns det ett gammalt rum?
		{
			//gör en väg från gamla rummet till nya...
			if(!didTouchOtherRoom) //om det inte sitter ihop med något annat rum
			{
				int startX = lastX1 + (lastX2-lastX1)/2;
				int startY= lastY1 + (lastY2-lastY1)/2;
				int endX = x1 + (x2-x1)/2;
				int endY = y1 + (y2-y1)/2;
				int speedX = startX < endX ? 1 : -1;
				int speedY = startY < endY ? 1 : -1;
				
				int extraWidth = pathWidth%2!=0 ? 1 : 0;

				for(int x = startX; x != endX; x+=speedX)
				{
					for(int y = startY-pathWidth/2; y < startY+pathWidth/2+extraWidth; y++)
						tileMap->setFlags(x, y, 0);
				}

				for(int y = startY; y != endY; y+=speedY)
				{
					for(int x = endX-pathWidth/2; x < endX+pathWidth/2 + extraWidth; x++)
						tileMap->setFlags(x, y, 0);
				}
			}
		}
		lastX1 = x1;
		lastY1 = y1;
		lastX2 = x2;
		lastY2 = y2;
	}
}
Esempio n. 12
0
void naive_random_crack(){
  value_type x = arr[rr.nextInt(N)];
  int L,R; find_piece(ci, N, x,L,R);
  n_touched += R-L;
  add_crack(ci, N, x, partition(arr, x,L,R));
}
Esempio n. 13
0
double OmegaGibbs(double df,const vector<vector<vector<double> > > &D,
		  const vector<int> &oldClique,
		  const vector<vector<int> > &oldComponents,
		  int Q,int G,const double *Delta,
		  const double *r,const double *sigma2,
		  const double *tau2R,const double *b,
		  vector<vector<vector<double> > > &Omega,
		  Random &ran,int draw) {
  double dfNew = df + ((double) Q);
  
  // construct R matrix

  vector<vector<double> > R;
  R.resize(Q);
  int p,q;
  for (p = 0; p < Q; p++) {
    R[p].resize(Q);
  }
  for (p = 0; p < Q; p++) {
    R[p][p] = tau2R[p];
    for (q = p + 1; q < Q; q++) {
      R[p][q] = sqrt(tau2R[p] * tau2R[q]) * r[qq2index(p,q,Q)];
      R[q][p] = R[p][q];
    }
  }
  vector<vector<double> > RInverse;

  inverse(R,RInverse);

  // compute updated D parameter

  vector<vector<vector<double> > > DNew(D);
  vector<vector<vector<double> > > DeltaStar;
  DeltaStar.resize(DNew.size());

  // update first component of D
  
  int cliqueSize = DNew[0].size();
  DeltaStar[0].resize(cliqueSize);
  int g;
  for (g = 0; g < cliqueSize; g++) {
    DeltaStar[0][g].resize(Q);
    for (q = 0; q < Q; q++) 
      DeltaStar[0][g][q] = Delta[qg2index(q,g,Q,G)] / exp(0.5 * b[q] * log(sigma2[qg2index(q,g,Q,G)]));
  }
  
  vector<vector<double> > temp;
  quadratic2(DeltaStar[0],RInverse,temp);
  int g1,g2;
  for (g1 = 0; g1 < DNew[0].size(); g1++)
    for (g2 = 0; g2 < DNew[0][g1].size(); g2++)
      DNew[0][g1][g2] += temp[g1][g2];
  
  int first = cliqueSize;
  
  // update remaining components of D

  int k;
  for (k = 1; k < DNew.size(); k++) {
    int cliqueSize = DNew[k].size();
    DeltaStar[k].resize(cliqueSize);

    int g;
    for (g = 0; g < oldComponents[k].size(); g++) {
      DeltaStar[k][g].resize(Q);
      for (q = 0; q < Q; q++)
	DeltaStar[k][g][q] = DeltaStar[oldClique[k]][oldComponents[k][g]][q];
    }
    
    for (g = 0; g < cliqueSize - oldComponents[k].size(); g++) {
      DeltaStar[k][g + oldComponents[k].size()].resize(Q);
      int gg = g + oldComponents[k].size();
      for (q = 0; q < Q; q++) {
	int kqg = qg2index(q,g + first,Q,G);
	DeltaStar[k][gg][q] = Delta[kqg] / exp(0.5 * b[q] * log(sigma2[kqg]));
      }
    }

    vector<vector<double> > temp;
    quadratic2(DeltaStar[k],RInverse,temp);
    int g1,g2;
    for (g1 = 0; g1 < DNew[k].size(); g1++)
      for (g2 = 0; g2 < DNew[k][g1].size(); g2++)
	DNew[k][g1][g2] += temp[g1][g2];
    
    first += cliqueSize - oldComponents[k].size();
  }

  if (draw == 1)
    Omega = ran.HyperInverseWishart(dfNew,DNew,oldClique,oldComponents);

  double pot = ran.PotentialHyperInverseWishart(dfNew,DNew,oldClique,oldComponents,Omega);

  return pot;
}
Esempio n. 14
0
void updateMRF2perfect(int q,int g,int Q,int G,vector<int> &valueLower,
		       vector<int> &valueUpper,const vector<double> &potOn,
		       const vector<double> &potOff,
		       const vector<vector<int> > &neighbour,
		       double alpha,double beta,double betag,
		       Random &ran) {
  int kqg = qg2index(q,g,Q,G);
  double potLower = potOff[kqg] - potOn[kqg];
  double potUpper = potOff[kqg] - potOn[kqg];

  potLower += - alpha;
  potUpper += - alpha;

  int k;
  for (k = 0; k < neighbour[g].size(); k++) {
    int gg = neighbour[g][k];
    int ng = neighbour[g].size();
    int ngg = neighbour[gg].size();
    //    double w = beta * exp(- kappa * log((double) (ng * ngg)));
    double w = beta * (1.0 / ((double) ng) + 1.0 / ((double) ngg));


    int kqgg = qg2index(q,gg,Q,G);
    if (valueLower[kqgg] == 0 && valueUpper[kqgg] == 0) {
      potLower += w;
      potUpper += w;
    }
    else if (valueLower[kqgg] == 1 && valueUpper[kqgg] == 1) {
      potLower += - w;
      potUpper += - w;
    }
    else {
      potLower += w;
      potUpper += - w;
    }
  }

  int qq;
  for (qq = 0; qq < Q; qq++) {
    if (qq != q) {
      int kqqg = qg2index(qq,g,Q,G);

      if (valueLower[kqqg] == 0 && valueUpper[kqqg] == 0) {
	potLower += betag / ((double) (Q - 1));
	potUpper += betag / ((double) (Q - 1));
      }
      else if (valueLower[kqqg] == 1 && valueUpper[kqqg] == 1) {
	potLower += - betag / ((double) (Q - 1));
	potUpper += - betag / ((double) (Q - 1));
      }
      else {
	potLower += betag / ((double) (Q - 1));
	potUpper -= betag / ((double) (Q - 1));
      }
    }
  }

  double probLower;
  double probUpper;
  if (potLower < 0.0)
    probLower = 1.0 / (1.0 + exp(potLower));
  else
    probLower = exp(- potLower) / (1.0 + exp(- potLower));

  if (potUpper < 0.0)
    probUpper = 1.0 / (1.0 + exp(potUpper));
  else
    probUpper = exp(- potUpper) / (1.0 + exp(- potUpper));

  kqg = qg2index(q,g,Q,G);
  double u = ran.Unif01();
  if (u < probLower)
    valueLower[kqg] = 1;
  else
    valueLower[kqg] = 0;

  if (u < probUpper)
    valueUpper[kqg] = 1;
  else
    valueUpper[kqg] = 0;

  return;
}
Esempio n. 15
0
//-----------------------------------------------------------------------------
bool TestNormalEqSolver(Random& rng)
{
    // This test generates random normal equations to be solved by the BPP
    // solver.  Diagonal dominance is enforced on the cross-product matrix to
    // ensure that it is nonsingular.  A random passive set is also generated,
    // and it is designed to always have at least a single '1' entry in any
    // column.  The normal equation system is solved twice, one time with
    // column grouping and one time without.  The solutions are compared and
    // the residual norms printed out. 
    
    BitMatrix passive_set;
    SparseMatrix<double> A;
    DenseMatrix<double> W, WtW, WtA, X, X2;
    SetMaxThreadCount(std::thread::hardware_concurrency());
    double max_norm = 0.0, min_occupancy = 1.0, max_occupancy = 0.0;

    std::vector<unsigned int> col_indices;

    cout << "Running BPP normal equation solver test..." << endl;

    for (unsigned int i=0; i<BppTest::NUM_RUNS; ++i)
    {
        unsigned int m = rng.RandomRangeInt(16,  768);
        unsigned int n = rng.RandomRangeInt(64, 1024);

        // confine k to [4, 256], but also no larger than min(m, n)
        unsigned int s = std::min(m, n);
        unsigned int k = rng.RandomRangeInt( 4,  std::min(s, 256u));

        // force n == 1 occasionally
        if (0 == (i % 5))
            n = 1;

        col_indices.resize(n);
        for (unsigned int q=0; q<n; ++q)
            col_indices[q] = q;

        // generate random W
        W.Resize(m, k);
        RandomMatrix(W.Buffer(), W.LDim(), W.Height(), W.Width(), rng);

        // generate random sparse A with occupancy (0.1-0.09, 0.1+0.09)
        double occupancy = rng.RandomDouble(0.1, 0.09);
        unsigned int nz_per_col = occupancy * m;
        if (0 == nz_per_col)
            nz_per_col = 1;
        min_occupancy = std::min(min_occupancy, occupancy);
        max_occupancy = std::max(max_occupancy, occupancy);
        RandomSparseMatrix(rng, A, nz_per_col, m, m, n, n);

        // compute WtW and WtA
        WtW.Resize(k, k);
        WtA.Resize(k, n);
        Gemm(TRANSPOSE, NORMAL, 1.0, W, W, 0.0, WtW);
        Gemm(TRANSPOSE, NORMAL, 1.0, W, A, 0.0, WtA);

        // make WtW diagonally dominant to guarantee nonsingular
        MakeDiagonallyDominant(WtW);

        // generate a random passive set with random numbers of identical cols
        passive_set.Resize(k, n);
        RandomPassiveSet(passive_set, rng, k, n);

        // X will contain the column-grouped solution, X2 will contain the
        // solution with each column solved independently
        X.Resize(k, n);
        X2.Resize(k, n);
        
        // group columns and solve the linear systems
        BppSolveNormalEq(col_indices, passive_set, WtW, WtA, X);

        // solve again, but this time with no column grouping
        BppSolveNormalEqNoGroup(col_indices, passive_set, WtW, WtA, X2);

        // compute difference matrix X = X - X2
        Axpy(-1.0, X2, X);

        // check norm
        double norm = Norm(X, FROBENIUS_NORM);
        max_norm = std::max(norm, max_norm);
        if (norm > 1.0e-6)
        {
            // error - the norm of the difference matrix should be tiny
            WriteDelimitedFile(WtW.LockedBuffer(), WtW.LDim(), 
                               WtW.Height(), WtW.Width(), "LHS_error.csv", 6);
            WriteDelimitedFile(WtA.LockedBuffer(), WtA.LDim(), 
                               WtA.Height(), WtA.Width(), "RHS_error.csv", 6);
            cout << "**** ERROR EXIT ****" << endl;
            break;
        }

        cout << "[" << setw(4) << i << "/" << setw(4) << BppTest::NUM_RUNS 
             <<"] m: " << setw(6) << m << ", n: " << setw(6) << n << ", k: " 
             << setw(3) << k << ", norm of residual: " << norm << endl;
    }

    cout << endl;
    cout << "\t**** Results for BPP Normal Eq. Solver Test ****" << endl;
    cout << endl;
    cout << "\t\t" << BppTest::NUM_RUNS << " runs " << endl;
    auto prec = cout.precision();
    cout.precision(4);
    cout << "\t\tMin sparse percentage: " << 100.0*min_occupancy << endl;
    cout << "\t\tMax sparse percentage: " << 100.0*max_occupancy << endl;
    cout.precision(prec);
    cout << "\t\tMax residual Frobenius norm: " << max_norm << endl;    
    cout << endl;
    cout << "\t*************************************************" << endl;
    cout << endl;

    return (max_norm < BppTest::MAX_ACCEPTABLE_FNORM);
}
Esempio n. 16
0
bool SelfOrganizingMap::train_( MatrixFloat &data ){
    
    //Clear any previous models
    clear();
    
    const UINT M = data.getNumRows();
    const UINT N = data.getNumCols();
    numInputDimensions = N;
    numOutputDimensions = numClusters;
    Random rand;
    
    //Setup the neurons
    neurons.resize( numClusters );
    
    if( neurons.size() != numClusters ){
        errorLog << "train_( MatrixFloat &data ) - Failed to resize neurons Vector, there might not be enough memory!" << std::endl;
        return false;
    }
    
    for(UINT j=0; j<numClusters; j++){
        
        //Init the neuron
        neurons[j].init( N, 0.5 );
        
        //Set the weights as a random training example
        neurons[j].weights = data.getRowVector( rand.getRandomNumberInt(0, M) );
    }
    
    //Setup the network weights
    switch( networkTypology ){
        case RANDOM_NETWORK:
            networkWeights.resize(numClusters, numClusters);
            
            //Set the diagonal weights as 1 (as i==j)
            for(UINT i=0; i<numClusters; i++){
                networkWeights[i][i] = 1;
            }
            
            //Randomize the other weights
            UINT indexA = 0;
            UINT indexB = 0;
            Float weight = 0;
            for(UINT i=0; i<numClusters*numClusters; i++){
                indexA = rand.getRandomNumberInt(0, numClusters);
                indexB = rand.getRandomNumberInt(0, numClusters);
                
                //Make sure the two random indexs are the same (as this is a diagonal and should be 1)
                if( indexA != indexB ){
                    //Pick a random weight between these two neurons
                    weight = rand.getRandomNumberUniform(0,1);
                    
                    //The weight betwen neurons a and b is the mirrored
                    networkWeights[indexA][indexB] = weight;
                    networkWeights[indexB][indexA] = weight;
                }
            }
            break;
    }
    
    //Scale the data if needed
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<M; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = scale(data[i][j],ranges[j].minValue,ranges[j].maxValue,0,1);
            }
        }
    }
    
    Float error = 0;
    Float lastError = 0;
    Float trainingSampleError = 0;
    Float delta = 0;
    Float minChange = 0;
    Float weightUpdate = 0;
    Float weightUpdateSum = 0;
    Float alpha = 1.0;
    Float neuronDiff = 0;
    UINT iter = 0;
    bool keepTraining = true;
    VectorFloat trainingSample;
    Vector< UINT > randomTrainingOrder(M);
    
    //In most cases, the training data is grouped into classes (100 samples for class 1, followed by 100 samples for class 2, etc.)
    //This can cause a problem for stochastic gradient descent algorithm. To avoid this issue, we randomly shuffle the order of the
    //training samples. This random order is then used at each epoch.
    for(UINT i=0; i<M; i++){
        randomTrainingOrder[i] = i;
    }
    std::random_shuffle(randomTrainingOrder.begin(), randomTrainingOrder.end());
    
    //Enter the main training loop
    while( keepTraining ){
        
        //Update alpha based on the current iteration
        alpha = Util::scale(iter,0,maxNumEpochs,alphaStart,alphaEnd);
        
        //Run one epoch of training using the online best-matching-unit algorithm
        error = 0;
        for(UINT i=0; i<M; i++){
            
            trainingSampleError = 0;
            
            //Get the i'th random training sample
            trainingSample = data.getRowVector( randomTrainingOrder[i] );
            
            //Find the best matching unit
            Float dist = 0;
            Float bestDist = grt_numeric_limits< Float >::max();
            UINT bestIndex = 0;
            for(UINT j=0; j<numClusters; j++){
                dist = neurons[j].getSquaredWeightDistance( trainingSample );
                if( dist < bestDist ){
                    bestDist = dist;
                    bestIndex = j;
                }
            }
            
            //Update the weights based on the distance to the winning neuron
            //Neurons closer to the winning neuron will have their weights update more
            for(UINT j=0; j<numClusters; j++){
                
                //Update the weights for the j'th neuron
                weightUpdateSum = 0;
                neuronDiff = 0;
                for(UINT n=0; n<N; n++){
                    neuronDiff = trainingSample[n] - neurons[j][n];
                    weightUpdate = networkWeights[bestIndex][j] * alpha * neuronDiff;
                    neurons[j][n] += weightUpdate;
                    weightUpdateSum += neuronDiff;
                }
                
                trainingSampleError += grt_sqr( weightUpdateSum );
            }
            
            error += grt_sqrt( trainingSampleError / numClusters );
        }
        
        //Compute the error
        delta = fabs( error-lastError );
        lastError = error;
        
        //Check to see if we should stop
        if( delta <= minChange ){
            converged = true;
            keepTraining = false;
        }
        
        if( grt_isinf( error ) ){
            errorLog << "train_(MatrixFloat &data) - Training failed! Error is NAN!" << std::endl;
            return false;
        }
        
        if( ++iter >= maxNumEpochs ){
            keepTraining = false;
        }
        
        trainingLog << "Epoch: " << iter << " Squared Error: " << error << " Delta: " << delta << " Alpha: " << alpha << std::endl;
    }
    
    numTrainingIterationsToConverge = iter;
    trained = true;
    
    return true;
}
Esempio n. 17
0
//-----------------------------------------------------------------------------
bool TestNormalEqSolverLeft(Random& rng)
{
    // This test generates random normal equations to be solved by the BPP
    // solvers.  Diagonal dominance is enforced on the cross-product matrix to
    // ensure that it is nonsingular.  A random passive set is also generated,
    // and it is designed to always have at least a single '1' entry in any
    // column.  The normal equation system is solved twice, one time using
    // the 'left-oriented' solver, and one time using the standard solver.
    // The solutions are compared and the residual norms printed out. 
    
    BitMatrix passive_set_1, passive_set_2;
    SparseMatrix<double> A;
    DenseMatrix<double> H, HHt, AHt, AHtt, X, Xt, temp, PSG, PSGt;
    SetMaxThreadCount(std::thread::hardware_concurrency());
    double max_norm = 0.0, min_occupancy = 1.0, max_occupancy = 0.0;

    std::vector<unsigned int> row_indices, col_indices;

    for (unsigned int i=0; i<BppTest::NUM_RUNS; ++i)
    {
        unsigned int m = rng.RandomRangeInt(16,  768);
        unsigned int n = rng.RandomRangeInt(64, 1024);

        // confine k to [4, 256], but also no larger than min(m, n)
        unsigned int s = std::min(m, n);
        unsigned int k = rng.RandomRangeInt( 4,  std::min(s, 256u));

        // force m == 1 occasionally (single row)
        if (0 == (i % 5))
            m = 1;

        row_indices.resize(m);
        for (unsigned int q=0; q<m; ++q)
            row_indices[q] = q;

        col_indices.resize(m);
        for (unsigned int q=0; q<m; ++q)
            col_indices[q] = q;

        // generate random H
        H.Resize(k, n);
        RandomMatrix(H.Buffer(), H.LDim(), H.Height(), H.Width(), rng);

        // generate random sparse A with occupancy (0.1-0.09, 0.1+0.09)
        double occupancy = rng.RandomDouble(0.1, 0.09);
        unsigned int nz_per_col = occupancy * m;
        if (0 == nz_per_col)
            nz_per_col = 1;
        min_occupancy = std::min(min_occupancy, occupancy);
        max_occupancy = std::max(max_occupancy, occupancy);
        RandomSparseMatrix(rng, A, nz_per_col, m, m, n, n);

        // compute HHt AHt, and (AHt)'
        HHt.Resize(k, k);
        AHt.Resize(m, k);
        AHtt.Resize(k, m);
        Gemm(NORMAL, TRANSPOSE, 1.0, H, H, 0.0, HHt);
        Gemm(NORMAL, TRANSPOSE, 1.0, A, H, 0.0, AHt);
        Transpose(AHt, AHtt);

        // make HHt diagonally dominant to guarantee nonsingular
        MakeDiagonallyDominant(HHt);

        // X will contain the column-grouped solution, X2 will contain the
        // solution with each column solved independently
        X.Resize(m, k);
        Xt.Resize(k, m);

        // load X and X'
        RandomMatrix(X.Buffer(), X.LDim(), X.Height(), X.Width(), rng);
        Transpose(X, Xt);

        // generate a passive set and the corresponding col and row indices
        PSG.Resize(m, k);
        RandomMatrix(PSG.Buffer(), PSG.LDim(), PSG.Height(), PSG.Width(), rng, 0.4, 0.45);
        passive_set_1.Resize(m, k);
        passive_set_1 = (PSG > 0.0);

        PSGt.Resize(k, m);
        Transpose(PSG, PSGt);
        passive_set_2.Resize(k, m);
        passive_set_2 = (PSGt > 0.0);

        // solve X * HHt = AHt   [mxk][kxk] = [mxk]
        //SolveNormalEqLeft(HHt, AHt, X);
        if (!BppSolveNormalEqLeftNoGroup(row_indices, passive_set_1, HHt, AHt, X))
        {
            cerr << "\tRandom matrix was rank-deficient; skipping..." << endl;
            continue;
        }

        // solve HHt * X' = (AHt)' with the standard solver  [kxk][kxm] = [kxm]
        //SolveNormalEq(HHt, AHtt, Xt);
        if (!BppSolveNormalEqNoGroup(col_indices, passive_set_2, HHt, AHtt, Xt))
        {
            cerr << "\tRandom matrix was rank-deficient; skipping..." << endl;
            continue;
        }

        // check the residual norm
        temp.Resize(m, k);
        Transpose(Xt, temp);
        Axpy(-1.0, X, temp);
        double norm = Norm(temp, FROBENIUS_NORM);
        max_norm = std::max(norm, max_norm);

        cout << "[" << setw(4) << i << "/" << setw(4) << BppTest::NUM_RUNS 
             <<"] m: " << setw(6) << m << ", n: " << setw(6) << n << ", k: " 
             << setw(3) << k << ", norm of residual: " << norm << endl;
    }
    
    cout << endl;
    cout << "\t****** Results for TestNormalEqSolverLeft *******" << endl;
    cout << endl;
    cout << "\t\t" << BppTest::NUM_RUNS << " runs " << endl;
    auto prec = cout.precision();
    cout.precision(4);
    cout << "\t\tMin sparse percentage: " << 100.0*min_occupancy << endl;
    cout << "\t\tMax sparse percentage: " << 100.0*max_occupancy << endl;
    cout.precision(prec);
    cout << "\t\tMax residual Frobenius norm: " << max_norm << endl;    
    cout << endl;
    cout << "\t*************************************************" << endl;
    cout << endl;

    return (max_norm < BppTest::MAX_ACCEPTABLE_FNORM);
}
Esempio n. 18
0
static char RandCh(Random &Rand) {
  if (Rand.RandBool()) return Rand(256);
  const char *Special = "!*'();:@&=+$,/?%#[]123ABCxyz-`~.";
  return Special[Rand(sizeof(Special) - 1)];
}
void DragAndDropContainer::startDragging (const var& sourceDescription,
                                          Component* sourceComponent,
                                          const Image& dragImage_,
                                          const bool allowDraggingToExternalWindows,
                                          const Point<int>* imageOffsetFromMouse)
{
    Image dragImage (dragImage_);

    if (dragImageComponent == nullptr)
    {
        MouseInputSource* draggingSource = Desktop::getInstance().getDraggingMouseSource (0);

        if (draggingSource == nullptr || ! draggingSource->isDragging())
        {
            jassertfalse;   // You must call startDragging() from within a mouseDown or mouseDrag callback!
            return;
        }

        const Point<int> lastMouseDown (Desktop::getLastMouseDownPosition());
        Point<int> imageOffset;

        if (dragImage.isNull())
        {
            dragImage = sourceComponent->createComponentSnapshot (sourceComponent->getLocalBounds())
                            .convertedToFormat (Image::ARGB);

            dragImage.multiplyAllAlphas (0.6f);

            const int lo = 150;
            const int hi = 400;

            Point<int> relPos (sourceComponent->getLocalPoint (nullptr, lastMouseDown));
            Point<int> clipped (dragImage.getBounds().getConstrainedPoint (relPos));
            Random random;

            for (int y = dragImage.getHeight(); --y >= 0;)
            {
                const double dy = (y - clipped.getY()) * (y - clipped.getY());

                for (int x = dragImage.getWidth(); --x >= 0;)
                {
                    const int dx = x - clipped.getX();
                    const int distance = roundToInt (std::sqrt (dx * dx + dy));

                    if (distance > lo)
                    {
                        const float alpha = (distance > hi) ? 0
                                                            : (hi - distance) / (float) (hi - lo)
                                                               + random.nextFloat() * 0.008f;

                        dragImage.multiplyAlphaAt (x, y, alpha);
                    }
                }
            }

            imageOffset = clipped;
        }
        else
        {
            if (imageOffsetFromMouse == nullptr)
                imageOffset = dragImage.getBounds().getCentre();
            else
                imageOffset = dragImage.getBounds().getConstrainedPoint (-*imageOffsetFromMouse);
        }

        dragImageComponent = new DragImageComponent (dragImage, sourceDescription, sourceComponent,
                                                     draggingSource->getComponentUnderMouse(), *this, imageOffset);

        currentDragDesc = sourceDescription;

        if (allowDraggingToExternalWindows)
        {
            if (! Desktop::canUseSemiTransparentWindows())
                dragImageComponent->setOpaque (true);

            dragImageComponent->addToDesktop (ComponentPeer::windowIgnoresMouseClicks
                                               | ComponentPeer::windowIsTemporary
                                               | ComponentPeer::windowIgnoresKeyPresses);
        }
        else
        {
            Component* const thisComp = dynamic_cast <Component*> (this);

            if (thisComp == nullptr)
            {
                jassertfalse;   // Your DragAndDropContainer needs to be a Component!
                return;
            }

            thisComp->addChildComponent (dragImageComponent);
        }

        static_cast <DragImageComponent*> (dragImageComponent.get())->updateLocation (false, lastMouseDown);
        dragImageComponent->setVisible (true);

       #if JUCE_WINDOWS
        // Under heavy load, the layered window's paint callback can often be lost by the OS,
        // so forcing a repaint at least once makes sure that the window becomes visible..
        ComponentPeer* const peer = dragImageComponent->getPeer();
        if (peer != nullptr)
            peer->performAnyPendingRepaintsNow();
       #endif
    }
}
Esempio n. 20
0
 unsigned getKey() {
     return random.select(0,MaxKey);
 }
Esempio n. 21
0
/* Program extracts from Chapter 3 of
bool GaussianMixtureModels::train(const MatrixDouble &data,const UINT K){

	modelTrained = false;
    failed = false;

	//Clear any previous training results
	det.clear();
	invSigma.clear();

    if( data.getNumRows() == 0 ){
        errorLog << "train(const MatrixDouble &trainingData,const unsigned int K) - Training Failed! Training data is empty!" << endl;
        return false;
    }

	//Resize the variables
	M = data.getNumRows();
	N = data.getNumCols();
	this->K = K;
	
	//Resize mu and resp
	mu.resize(K,N);
	resp.resize(M,K);
	
	//Resize sigma
	sigma.resize(K);
	for(UINT k=0; k<K; k++){
		sigma[k].resize(N,N);
	}
	
	//Resize frac and lndets
	frac.resize(K);
	lndets.resize(K);

	//Pick K random starting points for the inital guesses of Mu
	Random random;
	vector< UINT > randomIndexs(M);
	for(UINT i=0; i<M; i++) randomIndexs[i] = i;
	for(UINT i=0; i<M; i++){
		SWAP(randomIndexs[ random.getRandomNumberInt(0,M) ],randomIndexs[ random.getRandomNumberInt(0,M) ]);
	}
	for(UINT k=0; k<K; k++){
		for(UINT n=0; n<N; n++){
			mu[k][n] = data[ randomIndexs[k] ][n];
		}
	}

	//Setup sigma and the uniform prior on P(k)
	for(UINT k=0; k<K; k++){
		frac[k] = 1.0/double(K);
		for(UINT i=0; i<N; i++){
			for(UINT j=0; j<N; j++) sigma[k][i][j] = 0;
			sigma[k][i][i] = 1.0e-10;   //Set the diagonal to a small number
		}
	}

	loglike = 0;
	UINT iterCounter = 0;
	bool keepGoing = true;
	double change = 99.9e99;
    
	while( keepGoing ){
		change = estep( data );
		mstep( data );

		if( fabs( change ) < minChange ) keepGoing = false;
		if( ++iterCounter >= maxIter ) keepGoing = false;
		if( failed ) keepGoing = false;
	}

	if( failed ){
		errorLog << "train(UnlabelledClassificationData &trainingData,unsigned int K) - Training failed!" << endl;
		return modelTrained;
	}

	//Compute the inverse of sigma and the determinants for prediction
	if( !computeInvAndDet() ){
        det.clear();
        invSigma.clear();
        errorLog << "train(UnlabelledClassificationData &trainingData,unsigned int K) - Failed to compute inverse and determinat!" << endl;
        return false;
    }

    //Flag that the model was trained
	modelTrained = true;

	return true;
}
TimeSeriesClassificationData TimeSeriesClassificationData::partition(const UINT trainingSizePercentage,const bool useStratifiedSampling){

    //Partitions the dataset into a training dataset (which is kept by this instance of the TimeSeriesClassificationData) and
    //a testing/validation dataset (which is return as a new instance of the TimeSeriesClassificationData).  The trainingSizePercentage
    //therefore sets the size of the data which remains in this instance and the remaining percentage of data is then added to
    //the testing/validation dataset

    //The dataset has changed so flag that any previous cross validation setup will now not work
    crossValidationSetup = false;
    crossValidationIndexs.clear();

    TimeSeriesClassificationData trainingSet(numDimensions);
    TimeSeriesClassificationData testSet(numDimensions);
    trainingSet.setAllowNullGestureClass(allowNullGestureClass);
    testSet.setAllowNullGestureClass(allowNullGestureClass);
    vector< UINT > indexs( totalNumSamples );

    //Create the random partion indexs
    Random random;
    UINT randomIndex = 0;

    if( useStratifiedSampling ){
        //Break the data into seperate classes
        vector< vector< UINT > > classData( getNumClasses() );

        //Add the indexs to their respective classes
        for(UINT i=0; i<totalNumSamples; i++){
            classData[ getClassLabelIndexValue( data[i].getClassLabel() ) ].push_back( i );
        }

        //Randomize the order of the indexs in each of the class index buffers
        for(UINT k=0; k<getNumClasses(); k++){
            UINT numSamples = (UINT)classData[k].size();
            for(UINT x=0; x<numSamples; x++){
                //Pick a random index
                randomIndex = random.getRandomNumberInt(0,numSamples);

                //Swap the indexs
                SWAP( classData[k][ x ] ,classData[k][ randomIndex ] );
            }
        }

        //Loop over each class and add the data to the trainingSet and testSet
        for(UINT k=0; k<getNumClasses(); k++){
            UINT numTrainingExamples = (UINT) floor( double(classData[k].size()) / 100.0 * double(trainingSizePercentage) );

            //Add the data to the training and test sets
            for(UINT i=0; i<numTrainingExamples; i++){
                trainingSet.addSample( data[ classData[k][i] ].getClassLabel(), data[ classData[k][i] ].getData() );
            }
            for(UINT i=numTrainingExamples; i<classData[k].size(); i++){
                testSet.addSample( data[ classData[k][i] ].getClassLabel(), data[ classData[k][i] ].getData() );
            }
        }

        //Overwrite the training data in this instance with the training data of the trainingSet
        data = trainingSet.getClassificationData();
        totalNumSamples = trainingSet.getNumSamples();
    }else{

        const UINT numTrainingExamples = (UINT) floor( double(totalNumSamples) / 100.0 * double(trainingSizePercentage) );
        //Create the random partion indexs
        Random random;
        for(UINT i=0; i<totalNumSamples; i++) indexs[i] = i;
        for(UINT x=0; x<totalNumSamples; x++){
            //Pick a random index
            randomIndex = random.getRandomNumberInt(0,totalNumSamples);

            //Swap the indexs
            SWAP( indexs[ x ] , indexs[ randomIndex ] );
        }

        //Add the data to the training and test sets
        for(UINT i=0; i<numTrainingExamples; i++){
            trainingSet.addSample( data[ indexs[i] ].getClassLabel(), data[ indexs[i] ].getData() );
        }
        for(UINT i=numTrainingExamples; i<totalNumSamples; i++){
            testSet.addSample( data[ indexs[i] ].getClassLabel(), data[ indexs[i] ].getData() );
        }

        //Overwrite the training data in this instance with the training data of the trainingSet
        data = trainingSet.getClassificationData();
        totalNumSamples = trainingSet.getNumSamples();
    }

    return testSet;
}
Esempio n. 24
0
/**
  * Moves to the next word
  */
void Phonemic::nextWord()
{
	//resetOrder(); 	/* reset cube solution order */
    // Advance the word (and level)
	word++;
	if(word >= 8) {
		level++;
		word = 0;
	}
    if(level > LAST_LEVEL) {
        level = 0;
        word = 0;
    }
	
	//word_num = (level * 8) + word;
	
    // Test for game over
    // TODO: check for end-of-game
    length = wordFamilies[level].length[word];
	/* deprecated, added word length to wordfamily struct */
    /*while(length < MAX_WORD_SIZE && wordFamilies[level].words[word][length] != -1)
        length++;
     */   
	 
	while (length > 3) {
		word++;
		if(word >= 8) {
			level++;
			word = 0;
		}
		if(level > LAST_LEVEL) {
			level = 0;
			word = 0;
		}
		length = wordFamilies[level].length[word];
	}
	 
	int i = 0;
	int r = 0;
	int j[3] = {-1,-1,-1};
	for (int k = 0; k < 3; k++) {
		do {
			r = gRandom.randint(0, 2);
		} while (r == j[0] || r == j[1] || r == j[2]);
		j[k] = r;
	}
	for(CubeID cube: CubeSet::connected())
	{
        // Reverse the order of the cubes every other word
		// change this to random rather than reverse, and should have same functionality as proto-type
		// TO DO
        //int j = i;
        //if(word % 2 == 0) j = length - i - 1;
		if(i < length) {
				//	OLD
				cubes[cube].symbol = wordFamilies[level].phonemes[word][j[i]];
				//cubes[cube].images[0] = graphemes[wordFamilies[level].graphemes[word][j]].grapheme[0]; 
				//cubes[cube].images[1] = graphemes[wordFamilies[level].graphemes[word][j]].grapheme[1]; 
				cubes[cube].vid.bg0.image(vec(0,0), Background/**cubes[cube].images[0]*/);
				cubes[cube].sound = phonemes[wordFamilies[level].phonemes[word][j[i]]].phoneme;
			/* NEW */
			//order[temp] = j;			/* track solution sequence */
			//cubes[cube].symbol = j;		/* used to compare for solution sequence */
			/* TO DO */
			cubes[cube].vid.bg1.setMask(BG1Mask::filled(vec(0,4), vec(16,8))); /* temp - can be taken out? */
			cubes[cube].vid.bg1.text(vec(0,4), Font2, "        ");
			cubes[cube].vid.bg1.text(vec(4,4), Font2, wordFamilies[level].graphemes[word][j[i]]);	/* get draw to work properly?? */
			//cubes[cube].sound = phonemes[wordFamilies[level].phoneme[word][j]].phoneme;		/* attach sound??? */
			//System::paint();	/* is this neccesary? */
		} else {
			/* kept this the same, shouldn't require any changes */
			cubes[cube].symbol = -1;
            //cubes[cube].images[0] = &Sleep;
            //cubes[cube].images[1] = &Smile;
            cubes[cube].vid.bg0.image(vec(0,0), *cubes[cube].images[0]);
            cubes[cube].sound = &SfxBuzzer;
		}	
		i++;
		
		// Allocate 16x2 tiles on BG1 for text at the bottom of the screen
        // cubes[cube].vid.bg1.setMask(BG1Mask::filled(vec(0,14), vec(16,2)));
		// text.set(0, -20);
            // textTarget = text;
			// cubes[cube].vid.bg1.text(vec(0,14), Font, " Hello traveler ");
		// System::paint();
		
	}
	state = PLAY;
	
}
Esempio n. 25
0
    Photon Renderer::emitAndScatter(Random &random) {
        float currentRefractionIndex = 1.0;
        Scene::Sample sample;
        do {
            sample = scene()->uniformOnSurface(random);
        } while (luminance(sample.emission) * sample.emissionPower == 0.0f);
        float hitDistance; Scene::Sample hit;
        Ray ray;
        ray.origin = sample.position;
        do {
            ray.direction = random.cosineDirection(sample.normal);
        } while (!scene()->hit(ray, hitDistance, hit));
        Photon photon;
        photon.position = hit.position;
        photon.direction = ray.direction;
        photon.power = sample.emission * sample.emissionPower;
        while (true) {
            float action = random.uniformInRange01();

            float specularLuminance = luminance(hit.specular);
            if (action < specularLuminance) {
                Ray reflectedRay;
                reflectedRay.origin = hit.position;
                reflectedRay.direction = reflect(ray.direction, hit.normal);
                reflectedRay.origin += reflectedRay.direction * 0.001f;
                ray = reflectedRay;
                if (!scene()->hit(ray, hitDistance, hit)) {
                    photon.power = Color::BLACK;
                    break;
                }
                photon.position = hit.position;
                photon.direction = ray.direction;
                continue;
            } else {
                action -= specularLuminance;
            }

            float transmissionLuminance = luminance(hit.transmission);
            if (action < transmissionLuminance) {
                Ray transmittedRay;
                transmittedRay.origin = hit.position;
                if (dot(hit.normal, ray.direction) <= 0.0f) {
                    transmittedRay.direction = transmit(ray.direction, +hit.normal, 1.0f / hit.refractionIndex);
                } else {
                    transmittedRay.direction = transmit(ray.direction, -hit.normal, hit.refractionIndex / 1.0f);
                }
                transmittedRay.origin += transmittedRay.direction * 0.001f;
                ray = transmittedRay;
                if (nan(sqrLength(ray.direction)) || !scene()->hit(ray, hitDistance, hit)) {
                    photon.power = Color::BLACK;
                    break;
                }
                photon.position = hit.position;
                photon.direction = ray.direction;
                continue;
            } else {
                action -= transmissionLuminance;
            }

            // absorbtion;
            break;
        }
        return photon;
    }
Esempio n. 26
0
void Domain::write(char* prefix, int format, double mu)
{
   ofstream xdr, txt, buchholz;
   stringstream strstrm, txtstrstrm, buchholzstrstrm;
   if(format == FORMAT_BRANCH)
   {
      strstrm << prefix << ".xdr";
   }
   if(format == FORMAT_BUCHHOLZ)
   {
      strstrm << prefix << ".inp";
   }
   xdr.open(strstrm.str().c_str(), ios::trunc);
   if(format == FORMAT_BRANCH)
   {
      txtstrstrm << prefix << "_1R.txt";
   }
   if(format == FORMAT_BUCHHOLZ)
   {
      txtstrstrm << prefix << "_1R.cfg";
   }
   txt.open(txtstrstrm.str().c_str(), ios::trunc);
   if(format == FORMAT_BUCHHOLZ)
   {
      buchholzstrstrm << prefix << "_1R.xml";
      buchholz.open(buchholzstrstrm.str().c_str(), ios::trunc);

      /*
       * Gesamter Inhalt der Buchholz-Datei
       */
      buchholz << "<?xml version = \'1.0\' encoding = \'UTF-8\'?>\n<mardyn version=\""
               << TIME << "\">\n   <simulation type=\"MD\">\n      <input type=\"oldstyle\">"
               << prefix << "_1R.cfg</input>\n   </simulation>\n</mardyn>";
      buchholz.close();
   }

   Random* r = new Random();
   r->init(
      (int)(3162.3*box[0]) + (int)(31623.0*box[1]) - (int)(316.23*box[2])
   );
   double REFTIME = SIG_REF * sqrt(REFMASS / EPS_REF);
   double VEL_REF = SIG_REF / REFTIME;
   cout << "Velocity unit 1 = " << VEL_REF << " * 1620.34 m/s = "
        << 1620.34 * VEL_REF << " m/s.\n";
   double REFCARG = sqrt(EPS_REF * SIG_REF);
   cout << "Charge unit 1 = " << REFCARG << " e.\n";
   double DIP_REF = SIG_REF*REFCARG;
   double QDR_REF = SIG_REF*DIP_REF;
   double REFOMGA = 1.0 / REFTIME;

   unsigned fl_units[3];
   double fl_unit[3];
   double N_boxes = N / 3.0;
   fl_units[1] = round(
                    pow(
                       (N_boxes * box[1]*box[1])
                                / (this->box[0] * this->box[2]), 1.0/3.0
                    )
                 );
   if(fl_units[1] == 0) fl_units[1] = 1;
   double bxbz_id = N_boxes / fl_units[1];
   fl_units[0] = round(sqrt(this->box[0] * bxbz_id / this->box[2]));
   if(fl_units[0] == 0) fl_units[0] = 1;
   fl_units[2] = ceil(bxbz_id / fl_units[0]);
   for(int d=0; d < 3; d++) fl_unit[d] = box[d] / (double)fl_units[d];
   cout << "Unit cell dimensions: " << fl_unit[0] << " x " << fl_unit[1] << " x " << fl_unit[2] << ".\n";
   bool fill[fl_units[0]][fl_units[1]][fl_units[2]][3];
   unsigned slots = 3 * fl_units[0] * fl_units[1] * fl_units[2];
   /*
   double pfill = (double)N / slots;
   unsigned N1 = 0;
   for(unsigned i=0; i < fl_units[0]; i++)
      for(unsigned j=0; j < fl_units[1]; j++)
         for(unsigned k=0; k < fl_units[2]; k++)
            for(int d=0; d < 3; d++)
            {
               bool tfill = (pfill >= r->rnd());
               fill[i][j][k][d] = tfill;
               if(tfill) N1++;
            }
   */
   unsigned N1 = slots; 
   for(unsigned i=0; i < fl_units[0]; i++)
      for(unsigned j=0; j < fl_units[1]; j++)
         for(unsigned k=0; k < fl_units[2]; k++)
            for(int d=0; d < 3; d++) fill[i][j][k][d] = true;

   bool tswap;
   double pswap;
   for(int m=0; m < PRECISION; m++)
   {
      tswap = (N1 < N);
      pswap = ((double)N - (double)N1) / ((tswap? slots: 0) - (double)N1);
      // cout << "(N = " << N << ", N1 = " << N1 << ", tswap = " << tswap << ", pswap = " << pswap << ")\n";
      for(unsigned i=0; i < fl_units[0]; i++)
         for(unsigned j=0; j < fl_units[1]; j++)
            for(unsigned k=0; k < fl_units[2]; k++)
               for(int d=0; d < 3; d++)
                  if(pswap >= r->rnd())
                  {
                     if(fill[i][j][k][d]) N1--;
                     fill[i][j][k][d] = tswap;
                     if(tswap) N1++;
                  }
   }
   cout << "Filling " << N1 << " of 3*"
        << fl_units[0] << "*" << fl_units[1] << "*" << fl_units[2]
        << " = " << 3*fl_units[0]*fl_units[1]*fl_units[2]
        << " slots (ideally " << N << ").\n";

   double LJ_CUTOFF;
   double EL_CUTOFF;
   double FLUIDMASS, EPS_FLUID, SIG_FLUID, FLUIDLONG, QDR_FLUID;
   if(fluid == FLUID_AR)
   {
      FLUIDMASS = ARMASS;
      EPS_FLUID = EPS_AR;
      SIG_FLUID = SIG_AR;
      LJ_CUTOFF = CUT_AR;
   }
   else if(fluid == FLUID_CH4)
   {
      FLUIDMASS = CH4MASS;
      EPS_FLUID = EPS_CH4;
      SIG_FLUID = SIG_CH4;
      LJ_CUTOFF = CUT_CH4;
   }
   else if(fluid == FLUID_C2H6)
   {
      FLUIDMASS = C2H6MASS;
      EPS_FLUID = EPS_C2H6;
      SIG_FLUID = SIG_C2H6;
      FLUIDLONG = C2H6LONG;
      QDR_FLUID = QDR_C2H6;
      LJ_CUTOFF = CUT_C2H6;
   }
   else if(fluid == FLUID_N2)
   {
      FLUIDMASS = N2MASS;
      EPS_FLUID = EPS_N2;
      SIG_FLUID = SIG_N2;
      FLUIDLONG = N2LONG;
      QDR_FLUID = QDR_N2;
      LJ_CUTOFF = CUT_N2;
   }
   else if(fluid == FLUID_CO2)
   {
      FLUIDMASS = CO2MASS;
      EPS_FLUID = EPS_CO2;
      SIG_FLUID = SIG_CO2;
      FLUIDLONG = CO2LONG;
      QDR_FLUID = QDR_CO2;
      LJ_CUTOFF = CUT_CO2;
   }
   else if(fluid == FLUID_EOX)
   {
      FLUIDMASS = 2*CEOXMASS + OEOXMASS;
      LJ_CUTOFF = CUTLJEOX;
   }
   else
   {
      cout << "Unavailable fluid ID " << fluid << ".\n";
      exit(20);
   }

   if((fluid == FLUID_AR) || (fluid == FLUID_CH4))
   {
      EL_CUTOFF = LJ_CUTOFF;
   }
   else EL_CUTOFF = 1.2 * LJ_CUTOFF;

   xdr.precision(9);
   if(format == FORMAT_BRANCH)
   {
      xdr << "mardyn " << TIME << " tersoff\n"
          << "# mardyn input file, ls1 project\n"
          << "# written by animaker, the mesh generator\n";
   }
   if(format == FORMAT_BUCHHOLZ)
   {
      xdr << "mardyn trunk " << TIME << "\n";
   }

   if((format == FORMAT_BRANCH) || (format == FORMAT_BUCHHOLZ))
   {
      xdr << "T\t" << T/EPS_REF << "\n";
      xdr << "t\t0.0\nL\t" << box[0]/SIG_REF << "\t"
          << box[1]/SIG_REF << "\t" << box[2]/SIG_REF
          << "\nC\t1\n";

      if((fluid == FLUID_AR) || (fluid == FLUID_CH4))
      {
         xdr << "1 0 0 0 0\n"  // LJ, C, Q, D, Tersoff
             << "0.0 0.0 0.0\t"
             << FLUIDMASS/REFMASS << " " << EPS_FLUID/EPS_REF << " "
             << SIG_FLUID/SIG_REF;
         if(format == FORMAT_BUCHHOLZ) xdr << "\t" << LJ_CUTOFF/SIG_REF << " 0";
         xdr << "\t0.0 0.0 0.0\n";
      }
      else if(fluid == FLUID_EOX)
      {
         xdr << "3 0 0 1 0\n";  // LJ, C, Q, D, Tersoff

         xdr << R0_C1EOX/SIG_REF << " " << R1_C1EOX/SIG_REF << " "
             << R2_C1EOX/SIG_REF << "\t" << CEOXMASS/REFMASS << " "
             << EPS_CEOX/EPS_REF << " " << SIG_CEOX/SIG_REF;
         if(format == FORMAT_BUCHHOLZ) xdr << "\t" << LJ_CUTOFF/SIG_REF << " 0";
         xdr << "\n" << R0_C2EOX/SIG_REF << " " << R1_C2EOX/SIG_REF << " "
             << R2_C2EOX/SIG_REF << "\t" << CEOXMASS/REFMASS << " "
             << EPS_CEOX/EPS_REF << " " << SIG_CEOX/SIG_REF;
         if(format == FORMAT_BUCHHOLZ) xdr << "\t" << LJ_CUTOFF/SIG_REF << " 0";
         xdr << "\n" << R0_O_EOX/SIG_REF << " " << R1_O_EOX/SIG_REF << " "
             << R2_O_EOX/SIG_REF << "\t" << OEOXMASS/REFMASS << " "
             << EPS_OEOX/EPS_REF << " " << SIG_OEOX/SIG_REF;
         if(format == FORMAT_BUCHHOLZ) xdr << "\t" << LJ_CUTOFF/SIG_REF << " 0";
         xdr << "\n";

         xdr << R0DIPEOX/SIG_REF << " " << R1DIPEOX/SIG_REF << " "
             << R2DIPEOX/SIG_REF << "\t0.0 0.0 1.0 "
             << DIPOLEOX/DIP_REF;

         xdr << "\n0.0 0.0 0.0\n";
      }
      else
      {
         xdr << "2 0 1 0 0\n";  // LJ, C, Q, D, Tersoff

         xdr << "0.0 0.0 " << -0.5*FLUIDLONG/SIG_REF << "\t"
             << 0.5*FLUIDMASS/REFMASS << " " << EPS_FLUID/EPS_REF
             << " " << SIG_FLUID/SIG_REF;
         if(format == FORMAT_BUCHHOLZ) xdr << "\t" << LJ_CUTOFF/SIG_REF << " 0";
         xdr << "\n" << "0.0 0.0 " << +0.5*FLUIDLONG/SIG_REF << "\t"
             << 0.5*FLUIDMASS/REFMASS << " " << EPS_FLUID/EPS_REF
             << " " << SIG_FLUID/SIG_REF;
         if(format == FORMAT_BUCHHOLZ) xdr << "\t" << LJ_CUTOFF/SIG_REF << " 0";
         xdr << "\n";

         xdr << "0.0 0.0 0.0\t0.0 0.0 1.0\t" << QDR_FLUID/QDR_REF;

         xdr << "\n0.0 0.0 0.0\n";
      }
      xdr << "1.0e+10\n";

      xdr << "N" << "\t" << N1 << "\nM" << "\t" << "ICRVQD\n\n";

      txt.precision(6);
      txt << "mardynconfig\n# \ntimestepLength\t" << DT/REFTIME
          << "\ncutoffRadius\t" << EL_CUTOFF/SIG_REF
          << "\nLJCutoffRadius\t" << LJ_CUTOFF/SIG_REF;
   }
   if(format == FORMAT_BRANCH)
   {
      txt << "\ntersoffCutoffRadius\t"
          <<  LJ_CUTOFF/(3.0*SIG_REF);
   }
   if((format == FORMAT_BRANCH) || (format == FORMAT_BUCHHOLZ))
   {
      txt << "\ninitCanonical\t50000\n";
      if(muVT)
      {
         txt.precision(9);
         txt << "chemicalPotential " << mu/EPS_REF
             << " component 1 control 0.0 0.0 0.0 to "
             << this->box[0]/SIG_REF << " " << this->box[1]/SIG_REF
             << " " << this->box[2]/SIG_REF << " conduct "
             << 1 + (int)round(0.003 * N)
             << " tests every 3 steps\n";
	 txt << "planckConstant\t" 
	     << sqrt(6.28319 * T/EPS_REF) << "\n";  // sqrt(2 pi kT)
         txt << "initGrandCanonical\t100000\n";
      }
      txt.precision(5);
      txt << "initStatistics\t150000\n";
   }
   if(format == FORMAT_BRANCH)
   {
      txt << "phaseSpaceFile\t" << prefix << ".xdr\n";
   }
   if(format == FORMAT_BUCHHOLZ)
   {
      txt << "phaseSpaceFile\tOldStyle\t" << prefix << ".inp\n";
   }
   if((format == FORMAT_BRANCH) || (format == FORMAT_BUCHHOLZ))
   {
      txt << "datastructure\tLinkedCells\t1\noutput\t"
          << "ResultWriter\t100\t" << prefix
          << "_1R\noutput\tXyzWriter\t10000\t" << prefix
          << "_1R.buxyz\n";
   }
   txt.close();

   double I[3];
   for(int d=0; d < 3; d++) I[d] = 0.0;
   if(fluid == FLUID_EOX)
   {
      I[0] = I_XX_EOX;
      I[1] = I_YY_EOX;
      I[2] = I_ZZ_EOX;
   }
   else if(!((fluid == FLUID_AR) || (fluid == FLUID_CH4)))
   {
      I[0] = 0.25 * FLUIDMASS * FLUIDLONG * FLUIDLONG;
      I[1] = I[0];
   }

   unsigned id = 1;
   double tr[3];
   unsigned ii[3];
   for(ii[0]=0; ii[0] < fl_units[0]; (ii[0]) ++)
      for(ii[1]=0; ii[1] < fl_units[1]; (ii[1]) ++)
         for(ii[2]=0; ii[2] < fl_units[2]; (ii[2]) ++)
            for(int d=0; d < 3; d++)
            {
               if(fill[ ii[0] ][ ii[1] ][ ii[2] ][ d ])
               {
                  for(int k=0; k < 3; k++)
                  {
                     tr[k] = fl_unit[k] * (
                                ii[k] + 0.02*r->rnd() + ((k == d)? 0.24: 0.74)
                             );
                  }
                  for(int k=0; k < 3; k++)
                  {
                     if(tr[k] > box[k]) tr[k] -= box[k];
                     else if(tr[k] < 0.0) tr[k] += box[k];
                  }
                  double tv = sqrt(3.0*T / FLUIDMASS);
                  double phi = 6.283185 * r->rnd();
                  double omega = 6.283185 * r->rnd();
                  double w[3];
                  for(int d=0; d < 3; d++)
                     w[d] = (I[d] == 0)? 0.0: ((r->rnd() > 0.5)? 1: -1) * sqrt(2.0*r->rnd()*T / I[d]);
                  // xdr << "(" << ii[0] << "/" << ii[1] << "/" << ii[2] << "), j = " << j << ", d = " << d << ":\t";
                  if((format == FORMAT_BRANCH) || (format == FORMAT_BUCHHOLZ))
                  {
                     xdr << id << " " << 1 << "\t" << tr[0]/SIG_REF
                         << " " << tr[1]/SIG_REF << " " << tr[2]/SIG_REF
                         << "\t" << tv*cos(phi)*cos(omega)/VEL_REF << " "
                         << tv*cos(phi)*sin(omega)/VEL_REF << " "
                         << tv*sin(phi)/VEL_REF << "\t1.0 0.0 0.0 0.0\t"
                         << w[0]/REFOMGA << " " << w[1]/REFOMGA << " "
                         << w[2]/REFOMGA << "\n";
                  }
                  id++;
               }
               else xdr << "\n";
            }
   xdr << "\n";

   xdr.close();
}
Esempio n. 27
0
bool
BillboardExtension::connect(MapNode* mapNode)
{
    if ( !mapNode )
    {
        OE_WARN << LC << "Illegal: MapNode cannot be null." << std::endl;
        return false;
    }

    OE_INFO << LC << "Connecting to MapNode.\n";

    if ( !_options.imageURI().isSet() )
    {
        OE_WARN << LC << "Illegal: image URI is required" << std::endl;
        return false;
    }

    if ( !_options.featureOptions().isSet() )
    {
        OE_WARN << LC << "Illegal: feature source is required" << std::endl;
        return false;
    }
    
    _features = FeatureSourceFactory::create( _options.featureOptions().value() );
    if ( !_features.valid() )
    {
        OE_WARN << LC << "Illegal: no valid feature source provided" << std::endl;
        return false;
    }

    //if ( _features->getGeometryType() != osgEarth::Symbology::Geometry::TYPE_POINTSET )
    //{
    //    OE_WARN << LC << "Illegal: only points currently supported" << std::endl;
    //    return false;
    //}

    _features->initialize( _dbOptions );

    osg::Vec3dArray* verts;
    if ( _features->getFeatureProfile() )
    {
        verts = new osg::Vec3dArray();
        
        OE_NOTICE << "Reading features...\n";
        osg::ref_ptr<FeatureCursor> cursor = _features->createFeatureCursor();
        while ( cursor.valid() && cursor->hasMore() )
        {
            Feature* f = cursor->nextFeature();
            if ( f && f->getGeometry() )
            {
                if ( f->getGeometry()->getComponentType() == Geometry::TYPE_POLYGON )
                {
                    FilterContext cx;
                    cx.setProfile( new FeatureProfile(_features->getFeatureProfile()->getExtent()) );

                    ScatterFilter scatter;
                    scatter.setDensity( _options.density().get() );
                    scatter.setRandom( true );
                    FeatureList featureList;
                    featureList.push_back(f);
                    scatter.push( featureList, cx );
                }

                // Init a filter to tranform feature in desired SRS 
                if (!mapNode->getMapSRS()->isEquivalentTo(_features->getFeatureProfile()->getSRS()))
                {
                    FilterContext cx;
                    cx.setProfile( new FeatureProfile(_features->getFeatureProfile()->getExtent()) );

                    TransformFilter xform( mapNode->getMapSRS() );
                    FeatureList featureList;
                    featureList.push_back(f);
                    cx = xform.push(featureList, cx);
                }

                GeometryIterator iter(f->getGeometry());
                while(iter.hasMore()) {
                    const Geometry* geom = iter.next();
                    osg::ref_ptr<osg::Vec3dArray> fVerts = geom->createVec3dArray();
                    verts->insert(verts->end(), fVerts->begin(), fVerts->end());
                }
            }
        }
    }
    else
    {
        OE_WARN << LC << "Illegal: feature source has no SRS" << std::endl;
        return false;
    }


    if ( verts && verts->size() > 0 )
    {
        OE_NOTICE << LC << "Read " << verts->size() << " points.\n";

        //localize all the verts
        GeoPoint centroid;
        _features->getFeatureProfile()->getExtent().getCentroid(centroid);
        centroid = centroid.transform(mapNode->getMapSRS());

        OE_NOTICE << "Centroid = " << centroid.x() << ", " << centroid.y() << "\n";

        osg::Matrixd l2w, w2l;
        centroid.createLocalToWorld(l2w);
        w2l.invert(l2w);

        osg::MatrixTransform* mt = new osg::MatrixTransform;
        mt->setMatrix(l2w);

        OE_NOTICE << "Clamping elevations...\n";
        osgEarth::ElevationQuery eq(mapNode->getMap());
        eq.setFallBackOnNoData( true );
        eq.getElevations(verts->asVector(), mapNode->getMapSRS(), true, 0.005);
        
        OE_NOTICE << "Building geometry...\n";
        osg::Vec3Array* normals = new osg::Vec3Array(verts->size());

        osg::Vec4Array* colors = new osg::Vec4Array(verts->size());
        Random rng;

        for (int i=0; i < verts->size(); i++)
        {
            GeoPoint vert(mapNode->getMapSRS(), (*verts)[i], osgEarth::ALTMODE_ABSOLUTE);

            osg::Vec3d world;
            vert.toWorld(world);
            (*verts)[i] = world * w2l;

            osg::Vec3 normal = world;
            normal.normalize();
            (*normals)[i] = osg::Matrix::transform3x3(normal, w2l);

            double n = rng.next();
            (*colors)[i].set( n, n, n, 1 );
        }

        //create geom and primitive sets
        osg::Geometry* geometry = new osg::Geometry();
        geometry->setVertexArray( verts );
        geometry->setNormalArray( normals );
        geometry->setNormalBinding( osg::Geometry::BIND_PER_VERTEX );
        geometry->setColorArray(colors);
        geometry->setColorBinding( osg::Geometry::BIND_PER_VERTEX );

        geometry->addPrimitiveSet( new osg::DrawArrays( GL_POINTS, 0, verts->size() ) );

        //create image and texture to render to
        osg::Texture2D* tex = new osg::Texture2D(_options.imageURI()->getImage(_dbOptions));
        tex->setResizeNonPowerOfTwoHint(false);
        tex->setFilter( osg::Texture::MIN_FILTER, osg::Texture::LINEAR_MIPMAP_LINEAR );
        tex->setFilter( osg::Texture::MAG_FILTER, osg::Texture::LINEAR );
        tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
        tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);

        geometry->setName("BillboardPoints");

        osg::Geode* geode = new osg::Geode;
        geode->addDrawable(geometry);

        //osg::ref_ptr<StateSetCache> cache = new StateSetCache();
        //Registry::shaderGenerator().run(geode, cache.get());

        //set the texture related uniforms
        osg::StateSet* geode_ss = geode->getOrCreateStateSet();
        geode_ss->setTextureAttributeAndModes( 2, tex, 1 );
        geode_ss->getOrCreateUniform("billboard_tex", osg::Uniform::SAMPLER_2D)->set( 2 );

        float bbWidth = (float)tex->getImage()->s() / 2.0f;
        float bbHeight = (float)tex->getImage()->t();
        float aspect = (float)tex->getImage()->s() / (float)tex->getImage()->t();
        if (_options.height().isSet())
        {
            bbHeight = _options.height().get();
            if (!_options.width().isSet())
            {
                bbWidth = bbHeight * aspect / 2.0f;
            }
        }
        if (_options.width().isSet())
        {
            bbWidth = _options.width().get() / 2.0f;
            if (!_options.height().isSet())
            {
                bbHeight = _options.width().get() / aspect;
            }
        }

        geode_ss->getOrCreateUniform("billboard_width", osg::Uniform::FLOAT)->set( bbWidth );
        geode_ss->getOrCreateUniform("billboard_height", osg::Uniform::FLOAT)->set( bbHeight );
        geode_ss->setMode(GL_BLEND, osg::StateAttribute::ON);

        //for now just using an osg::Program
        //TODO: need to add GeometryShader support to the shader comp setup
        VirtualProgram* vp = VirtualProgram::getOrCreate(geode_ss);
        vp->setName( "osgEarth Billboard Extension" );

        ShaderPackage shaders;
        shaders.add( "Billboard geometry shader", billboardGeomShader );
        shaders.add( "Billboard fragment shader", billboardFragShader );
        shaders.loadAll( vp );

        geode_ss->setMode( GL_CULL_FACE, osg::StateAttribute::OFF );
        geode->setCullingActive(false);

        mt->addChild(geode);
        mapNode->getModelLayerGroup()->addChild(mt);

        return true;
    }

    return false;
}
bool LogisticRegression::train(LabelledRegressionData trainingData){
    
    const unsigned int M = trainingData.getNumSamples();
    const unsigned int N = trainingData.getNumInputDimensions();
    const unsigned int K = trainingData.getNumTargetDimensions();
    trained = false;
    trainingResults.clear();
    
    if( M == 0 ){
        errorLog << "train(LabelledRegressionData trainingData) - Training data has zero samples!" << endl;
        return false;
    }
    
    if( K == 0 ){
        errorLog << "train(LabelledRegressionData trainingData) - The number of target dimensions is not 1!" << endl;
        return false;
    }
    
    numInputDimensions = N;
    numOutputDimensions = 1; //Logistic Regression will have 1 output
    inputVectorRanges.clear();
    targetVectorRanges.clear();
    
    //Scale the training and validation data, if needed
	if( useScaling ){
		//Find the ranges for the input data
        inputVectorRanges = trainingData.getInputRanges();
        
        //Find the ranges for the target data
		targetVectorRanges = trainingData.getTargetRanges();
        
		//Scale the training data
		trainingData.scale(inputVectorRanges,targetVectorRanges,0.0,1.0);
	}
    
    //Reset the weights
    Random rand;
    w0 = rand.getRandomNumberUniform(-0.1,0.1);
    w.resize(N);
    for(UINT j=0; j<N; j++){
        w[j] = rand.getRandomNumberUniform(-0.1,0.1);
    }

    double error = 0;
    double lastSquaredError = 0;
    double delta = 0;
    UINT iter = 0;
    bool keepTraining = true;
    Random random;
    vector< UINT > randomTrainingOrder(M);
    TrainingResult result;
    trainingResults.reserve(M);
    
    //In most cases, the training data is grouped into classes (100 samples for class 1, followed by 100 samples for class 2, etc.)
    //This can cause a problem for stochastic gradient descent algorithm. To avoid this issue, we randomly shuffle the order of the
    //training samples. This random order is then used at each epoch.
    for(UINT i=0; i<M; i++){
        randomTrainingOrder[i] = i;
    }
    std::random_shuffle(randomTrainingOrder.begin(), randomTrainingOrder.end());
    
    //Run the main stochastic gradient descent training algorithm
    while( keepTraining ){
        
        //Run one epoch of training using stochastic gradient descent
        totalSquaredTrainingError = 0;
        for(UINT m=0; m<M; m++){
            
            //Select the random sample
            UINT i = randomTrainingOrder[m];
            
            //Compute the error, given the current weights
            VectorDouble x = trainingData[i].getInputVector();
            VectorDouble y = trainingData[i].getTargetVector();
            double h = w0;
            for(UINT j=0; j<N; j++){
                h += x[j] * w[j];
            }
            error = y[0] - sigmoid( h );
            totalSquaredTrainingError += SQR(error);
            
            //Update the weights
            for(UINT j=0; j<N; j++){
                w[j] += learningRate * error * x[j];
            }
            w0 += learningRate * error;
        }
        
        //Compute the error
        delta = fabs( totalSquaredTrainingError-lastSquaredError );
        lastSquaredError = totalSquaredTrainingError;
        
        //Check to see if we should stop
        if( delta <= minChange ){
            keepTraining = false;
        }
        
        if( ++iter >= maxNumEpochs ){
            keepTraining = false;
        }
        
        if( isinf( totalSquaredTrainingError ) || isnan( totalSquaredTrainingError ) ){
            errorLog << "train(LabelledRegressionData &trainingData) - Training failed! Total squared error is NAN. If scaling is not enabled then you should try to scale your data and see if this solves the issue." << endl;
            return false;
        }
        
        //Store the training results
        rootMeanSquaredTrainingError = sqrt( totalSquaredTrainingError / double(M) );
        result.setRegressionResult(iter,totalSquaredTrainingError,rootMeanSquaredTrainingError);
        trainingResults.push_back( result );
        
        //Notify any observers of the new training data
        trainingResultsObserverManager.notifyObservers( result );
        
        trainingLog << "Epoch: " << iter << " SSE: " << totalSquaredTrainingError << " Delta: " << delta << endl;
    }
    
    //Flag that the algorithm has been trained
    regressionData.resize(1,0);
    trained = true;
    return trained;
}
Esempio n. 29
0
bool LinearRegression::train(LabelledRegressionData &trainingData){
    
    const unsigned int M = trainingData.getNumSamples();
    const unsigned int N = trainingData.getNumInputDimensions();
    const unsigned int K = trainingData.getNumTargetDimensions();
    trained = false;
    
    if( M == 0 ){
        errorLog << "train(LabelledRegressionData &trainingData) - Training data has zero samples!" << endl;
        return false;
    }
    
    if( K == 0 ){
        errorLog << "train(LabelledRegressionData &trainingData) - The number of target dimensions is not 1!" << endl;
        return false;
    }
    
    numFeatures = N;
    numOutputDimensions = 1; //Logistic Regression will have 1 output
    inputVectorRanges.clear();
    targetVectorRanges.clear();
    
    //Scale the training and validation data, if needed
	if( useScaling ){
		//Find the ranges for the input data
        inputVectorRanges = trainingData.getInputRanges();
        
        //Find the ranges for the target data
		targetVectorRanges = trainingData.getTargetRanges();
        
		//Scale the training data
		trainingData.scale(inputVectorRanges,targetVectorRanges,0.0,1.0);
	}
    
    //Reset the weights
    Random rand;
    w0 = rand.getRandomNumberUniform(-0.1,0.1);
    w.resize(N);
    for(UINT j=0; j<N; j++){
        w[j] = rand.getRandomNumberUniform(-0.1,0.1);
    }

    double error = 0;
    double errorSum = 0;
    double lastErrorSum = 0;
    double delta = 0;
    UINT iter = 0;
    bool keepTraining = true;
    Random random;
    vector< UINT > randomTrainingOrder(M);
    
    //In most cases, the training data is grouped into classes (100 samples for class 1, followed by 100 samples for class 2, etc.)
    //This can cause a problem for stochastic gradient descent algorithm. To avoid this issue, we randomly shuffle the order of the
    //training samples. This random order is then used at each epoch.
    for(UINT i=0; i<M; i++){
        randomTrainingOrder[i] = i;
    }
    std::random_shuffle(randomTrainingOrder.begin(), randomTrainingOrder.end());
    
    //Run the main stochastic gradient descent training algorithm
    while( keepTraining ){
        
        //Run one epoch of training using stochastic gradient descent
        errorSum = 0;
        for(UINT m=0; m<M; m++){
            
            //Select the random sample
            UINT i = randomTrainingOrder[m];
            
            //Compute the error, given the current weights
            VectorDouble x = trainingData[i].getInputVector();
            VectorDouble y = trainingData[i].getTargetVector();
            double h = w0;
            for(UINT j=0; j<N; j++){
                h += x[j] * w[j];
            }
            error = y[0] - sigmoid( h );
            errorSum += error;
            
            //Update the weights
            for(UINT j=0; j<N; j++){
                w[j] += learningRate  * error * x[j];
            }
            w0 += learningRate * error;
        }
        
        //Compute the error
        delta = fabs( errorSum-lastErrorSum );
        lastErrorSum = errorSum;
        
        //Check to see if we should stop
        if( delta <= minChange ){
            keepTraining = false;
        }
        
        if( ++iter >= maxNumIterations ){
            keepTraining = false;
        }
        
        trainingLog << "Epoch: " << iter << " TotalError: " << errorSum << " Delta: " << delta << endl;
    }
    
    //Flag that the algorithm has been trained
    regressionData.resize(1,0);
    trained = true;
    return trained;
}
Esempio n. 30
0
double DeltaGibbs(int g,double *Delta,int Q,int G,const int *S,double c2,
		  const double *tau2R,const double *b,const double *r,
		  const double *sigma2,const double *phi,
		  const int *psi,const double *x,
		  const int *delta,const double *nu,Random &ran,int draw) {
  double pot = 0.0;

  //
  // compute prior covariance matrix
  //

  int dim = 0;
  std::vector<int> on(Q,0);
  int q;
  for (q = 0; q < Q; q++) {
    int kqg = qg2index(q,g,Q,G);
    if (delta[kqg] == 1) {
      on[q] = 1;
      dim++;
    }
  }

  if (dim > 0) {
    std::vector<std::vector<double> > var;
    makeSigma(g,G,var,on,Q,c2,tau2R,b,sigma2,r);

    //
    // define prior mean
    //

    std::vector<double> Mean(dim,0.0);
    std::vector<double> meanPrior(Mean);

    //
    // compute extra linear and quadratic terms
    //

    std::vector<double> mean(dim,0.0);

    std::vector<double> lin(dim,0.0);
    std::vector<double> quad(dim,0.0);
    int s;
    int k = 0;
    for (q = 0; q < Q; q++) {
      if (on[q] == 1) {
	int kqg = qg2index(q,g,Q,G);
	double var0 = sigma2[kqg] * phi[kqg];
	double var1 = sigma2[kqg] / phi[kqg];
	int s;
	for (s = 0; s < S[q]; s++)
	  {
	    int ksq = sq2index(s,q,S,Q);
	    double variance = psi[ksq] == 0 ? var0 : var1;
	    quad[k] += 1.0 / variance;
	    int xIndex = sqg2index(s,q,g,S,Q,G);
	    lin[k] += (2.0 * psi[ksq] - 1.0) * (x[xIndex] - nu[kqg]) / variance;
	  }
	k++;
      }
    }

    //
    // Update parameters based on available observations
    //

    std::vector<std::vector<double> > varInv;
    double detPrior = inverse(var,varInv);
    std::vector<std::vector<double> > covInvPrior(varInv);
    for (k = 0; k < dim; k++) {
      Mean[k] += lin[k];
      varInv[k][k] += quad[k];
    }
    double detPosterior = 1.0 / inverse(varInv,var);
    matrixMult(var,Mean,mean);

    //
    // Draw new values
    //

    std::vector<double> vv(dim,0.0);
    if (draw == 1)
      vv = ran.MultiGaussian(var,mean);
    else {
      k = 0;
      for (q = 0; q < Q; q++) {
	if (on[q] == 1) {
	  int kqg = qg2index(q,g,Q,G);
	  vv[k] = Delta[kqg];
	  k++;
	}
      }
    }

    pot += ran.PotentialMultiGaussian(var,mean,vv);

    if (draw == 1) {
      k = 0;
      for (q = 0; q < Q; q++) {
	if (on[q] == 1) {
	  int kqg = qg2index(q,g,Q,G);
	  Delta[kqg] = vv[k];
	  k++;
	}
      }
    }
  }

  return pot;
}