double BLSSS::log_model_prob(const Selector &g)const{
    // borrowed from MLVS.cpp
    double num = vpri_->logp(g);
    if(num==BOOM::negative_infinity() || g.nvars() == 0) {
      // If num == -infinity then it is in a zero support point in the
      // prior.  If g.nvars()==0 then all coefficients are zero
      // because of the point mass.  The only entries remaining in the
      // likelihood are sums of squares of y[i] that are independent
      // of g.  They need to be omitted here because they are omitted
      // in the non-empty case below.
      return num;
    }
    SpdMatrix ivar = g.select(pri_->siginv());
    num += .5*ivar.logdet();
    if(num == BOOM::negative_infinity()) return num;

    Vector mu = g.select(pri_->mu());
    Vector ivar_mu = ivar * mu;
    num -= .5*mu.dot(ivar_mu);

    bool ok=true;
    ivar += g.select(suf().xtx());
    Matrix L = ivar.chol(ok);
    if(!ok)  return BOOM::negative_infinity();
    double denom = sum(log(L.diag()));  // = .5 log |ivar|
    Vector S = g.select(suf().xty()) + ivar_mu;
    Lsolve_inplace(L,S);
    denom-= .5*S.normsq();  // S.normsq =  beta_tilde ^T V_tilde beta_tilde
    return num-denom;
  }
Esempio n. 2
0
bool SelectRegion(Selector& sel, const typename TAAPos::ValueType& p, TAAPos& aaPos,
			   	  typename Grid::traits<typename TGeomObj::side>::callback cbRegionBoundary)
{
	typedef typename Grid::traits<TGeomObj>::iterator	TIter;

	if(!sel.grid())
		return false;

	Grid& g = *sel.grid();

//	first try to find the element which contains p
	TGeomObj* startElem = NULL;
	for(TIter iter = g.begin<TGeomObj>(); iter != g.end<TGeomObj>(); ++iter){
		if(ContainsPoint(*iter, p, aaPos)){
			startElem = *iter;
			break;
		}
	}

	if(!startElem)
		return false;

	sel.clear<TGeomObj>();
	sel.select(startElem);
	SelectionFill<TGeomObj>(sel, cbRegionBoundary);

	return true;
}
Esempio n. 3
0
File: mvn.cpp Progetto: cran/Boom
 //======================================================================
 Vector &impute_mvn(Vector &observation,
                    const Vector &mean, const SpdMatrix &variance,
                    const Selector &observed, RNG &rng) {
   if (observed.nvars() == observed.nvars_possible()) {
     return observation;
   } else if (observed.nvars() == 0) {
     observation = rmvn_mt(rng, mean, variance);
     return observation;
   }
   if (observation.size() != observed.nvars_possible()) {
     report_error("observation and observed must be the same size.");
   }
   
   // The distribution we want is N(mu, V), with 
   //  V = Sig11 - Sig12 Sig22.inv Sig.21
   // and
   // mu = mu1 - Sig12 Sig22.inv (y2 - mu2)
   // The 1's are missing, and the 2's are observed.
   Selector missing = observed.complement();
   Matrix cross_covariance = missing.select_rows(
       observed.select_cols(variance));
   SpdMatrix observed_precision = observed.select_square(variance).inv();
   Vector mu = missing.select(mean) + cross_covariance * observed_precision
       * (observed.select(observation) - observed.select(mean));
   SpdMatrix V = missing.select_square(variance)
       - sandwich(cross_covariance, observed_precision);
   Vector imputed = rmvn_mt(rng, mu, V);
   observed.fill_missing_elements(observation, imputed);
   return observation;
 }
  void BLSSS::draw_beta() {
    Selector g = m_->coef().inc();
    if(g.nvars() == 0) {
      m_->drop_all();
      return;
    }
    SpdMatrix ivar = g.select(pri_->siginv());
    Vector ivar_mu = ivar * g.select(pri_->mu());
    ivar += g.select(suf().xtx());
    ivar_mu += g.select(suf().xty());
    Vector b = ivar.solve(ivar_mu);
    b = rmvn_ivar_mt(rng(), b, ivar);

    // If model selection is turned off and some elements of beta
    // happen to be zero (because, e.g., of a failed MH step) we don't
    // want the dimension of beta to change.
    m_->set_included_coefficients(b, g);
  }
  void BLSSS::draw_beta() {
    Selector g = model_->coef().inc();
    if (g.nvars() == 0) {
      model_->drop_all();
      return;
    }
    SpdMatrix precision = g.select(slab_->siginv());
    Vector scaled_mean = precision * g.select(slab_->mu());
    precision += g.select(suf().xtx());
    Cholesky precision_cholesky_factor(precision);
    scaled_mean += g.select(suf().xty());
    Vector posterior_mean = precision_cholesky_factor.solve(scaled_mean);
    Vector beta = rmvn_precision_upper_cholesky_mt(
        rng(), posterior_mean, precision_cholesky_factor.getLT());

    // If model selection is turned off and some elements of beta
    // happen to be zero (because, e.g., of a failed MH step) we don't
    // want the dimension of beta to change.
    model_->set_included_coefficients(beta, g);
  }
Esempio n. 6
0
int main(int argc, const char * argv[])
{
    int port = 4567;  // Default port number
    int protocol = 1; // Default protocol number
    if (argc >= 2) port = atoi(argv[1]);
    if (argc >= 3) protocol = atoi(argv[2]);
    fprintf(stderr, "Usage %s [port number] [protocol number]\n", argv[0]);
    fprintf(stderr, "Starting server at port %d, using protocol %d\n", port, protocol);

    try {
    Selector serverSelector;
    DatagramSocket serverSocket(port);
    UDPMessageBox<FastMessage> messageBox(&serverSocket);
    IgpMessageListener listener(messageBox);

    messageBox.addListener(&listener);
    messageBox.addSessionListener(&listener);
    serverSelector.addSelectable(&serverSocket);

    IgpVirtualPeerMessageBox<FastMessage> igpMBox(listener, 1);
    PuyoIgpNatTraversal natPuncher(igpMBox, listener);
    igpMBox.addListener(&natPuncher);

    PuyoServer *responder;
    switch (protocol) {
        case 1: responder = new PuyoServerV1(igpMBox); break;
        case 2: responder = new PuyoServerV2(igpMBox); break;
        default:
                fprintf(stderr, "Erros: valid protocols are 1 and 2");
    }
    igpMBox.addListener(responder);
        while (true) {
            serverSelector.select(10);
            try {
                messageBox.idle();
                // Pb de timeout par la
                responder->idle();
                natPuncher.idle();
            }
            catch (ios_fc::Exception e) {
                e.printMessage();
            }
        }
    }
    catch (ios_fc::Exception e) {
        e.printMessage();
    }
    return 0;
}
Esempio n. 7
0
  //----------------------------------------------------------------------
  double LSB::log_model_prob(const Selector &g)const{
    double num = vs_->logp(g);
    if(num==BOOM::negative_infinity()) return num;

    Ominv = g.select(pri_->siginv());
    num += .5*Ominv.logdet();
    if(num == BOOM::negative_infinity()) return num;

    Vec mu = g.select(pri_->mu());
    Vec Ominv_mu = Ominv * mu;
    num -= .5*mu.dot(Ominv_mu);

    bool ok=true;
    iV_tilde_ = Ominv + g.select(suf()->xtx());
    Mat L = iV_tilde_.chol(ok);
    if(!ok)  return BOOM::negative_infinity();
    double denom = sum(log(L.diag()));  // = .5 log |Ominv|

    Vec S = g.select(suf()->xty()) + Ominv_mu;
    Lsolve_inplace(L,S);
    denom-= .5*S.normsq();  // S.normsq =  beta_tilde ^T V_tilde beta_tilde

    return num-denom;
  }
 // When dimensions are small the updates are trivial, and careful
 // optimization is not necessary.
 void Marginal::low_dimensional_update(
     const Vector &observation,
     const Selector &observed,
     const SparseKalmanMatrix &transition,
     const SparseKalmanMatrix &observation_coefficient_subset) {
   set_prediction_error(
       observed.select(observation)
       - observation_coefficient_subset * state_mean());
   SpdMatrix forecast_variance =
       observed.select_square(model_->observation_variance(time_index())) +
       observation_coefficient_subset.sandwich(state_variance());
   SpdMatrix forecast_precision = forecast_variance.inv();
   set_forecast_precision_log_determinant(forecast_precision.logdet());
   set_scaled_prediction_error(forecast_precision * prediction_error());
   set_kalman_gain(transition * state_variance() *
                   observation_coefficient_subset.Tmult(forecast_precision));
 }
Esempio n. 9
0
	virtual void run() {
		Selector selector;
		socket = new DatagramSocket(1900);
		socket->setReuseAddr();
		socket->setBroadcast();
		socket->joinGroup("239.255.255.250");

		socket->registerSelector(selector);

		while (!interrupted()) {
			if (selector.select(1000) > 0) {
				char buffer[1024] = {0,};
				int len = socket->recv(buffer, sizeof(buffer));
				if (len > 0) {
					buffer[len - 1] = 0;
					cout << "[RECV] >> " << buffer << endl;
				}
			} else {
				idle(10);
			}
		}

		socket->close();
	}
    // Effects:
    // 4 things are set.
    // 1) prediction_error
    // 2) scaled_prediction_error
    // 3) forecast_precision_log_determinant
    // 4) kalman_gain
    void Marginal::high_dimensional_update(
        const Vector &observation,
        const Selector &observed,
        const SparseKalmanMatrix &transition,
        const SparseKalmanMatrix &observation_coefficient_subset) {
      Vector observed_subset = observed.select(observation);
      set_prediction_error(observed_subset
                           - observation_coefficient_subset * state_mean());

      // At this point the Kalman recursions compute the forecast precision Finv
      // and its log determinant.  However, we can get rid of the forecast
      // precision matrix, and replace it with the scaled error = Finv *
      // prediction_error.
      //
      // To evaluate the normal likelihood, we need the quadratic form:
      //   error * Finv * error == error.dot(scaled_error).
      // We also need the log determinant of Finv.
      //
      // The forecast_precision can be computed using a version of the binomial
      // inverse theorem:
      //
      //  (A + UBV).inv =
      //    A.inv - A.inv * U * (I + B * V * Ainv * U).inv * B * V * Ainv.
      //
      // When applied to F = H + Z P Z' the theorem gives
      //
      //   Finv = Hinv - Hinv * Z * (I + P Z' Hinv Z).inv * P * Z' * Hinv
      //
      // This helps because H is diagonal.  The only matrix that needs to be
      // inverted is (I + PZ'HinvZ), which is a state x state matrix.
      // 
      // We don't compute Finv directly, we compute Finv * prediction_error.

      // observation_precision refers to the precision of the observed subset.
      DiagonalMatrix observation_precision(observed.select(
          1.0 / model_->observation_variance(time_index()).diag()));

      // Set inner_matrix to I + P * Z' * Hinv * Z
      SpdMatrix ZTZ = observation_coefficient_subset.inner(
          observation_precision.diag());
      // Note: the product of two SPD matrices need not be symmetric.
      Matrix inner_matrix = state_variance() * ZTZ;
      inner_matrix.diag() += 1.0;
      LU inner_lu(inner_matrix);
      
      // inner_inv_P is inner.inv() * state_variance.  This matrix need not be
      // symmetric.
      Matrix inner_inv_P = inner_lu.solve(state_variance());
      
      Matrix HinvZ = observation_precision *
          observation_coefficient_subset.dense();
      set_scaled_prediction_error(
          observation_precision * prediction_error()
          - HinvZ * inner_inv_P * HinvZ.Tmult(prediction_error()));
      
      // The log determinant of F.inverse is the negative log of det(H + ZPZ').
      // That determinant can be computed using the "matrix determinant lemma,"
      // which says det(A + UV') = det(I + V' * A.inv * U) * det(A)
      //
      // https://en.wikipedia.org/wiki/Matrix_determinant_lemma#Generalization
      //
      // With F = H + ZPZ', and setting A = H, U = Z, V' = PZ'.
      // Then det(F) = det(I + PZ' * Hinv * Z) * det(H)
      set_forecast_precision_log_determinant(
          observation_precision.logdet() - inner_lu.logdet());

      // The Kalman gain is:  K = T * P * Z' * Finv.
      // Substituting the expression for Finv from above gives
      //
      // K = T * P * Z' *
      //   (Hinv - Hinv * Z * (I + P Z' Hinv Z).inv * P * Z' * Hinv)
      Matrix ZtHinv = HinvZ.transpose();
      set_kalman_gain(transition * state_variance() *
                      (ZtHinv - ZTZ * inner_inv_P * ZtHinv));
    }
Esempio n. 11
0
int main(int argc, char *args[])
{
  int n = 8;
  cerr << "argc:"<<argc<<endl;
  if(argc!=n){
    hyperdatasetUsage(args[0]);
    exit(1);
  }

  int rands = 0;
  rands = atoi(args[1]);
  if(rands!=0)
    srand(rands);
  else{
    rands = time(0);
    srand(rands);
    cout << "ny seed: " << rands << endl;
  }

  //1. load the example file
  NEATsettings * s = new NEATsettings();
  ifstream ifs(args[2],ios::in);
  ifs>>s;
  ifs.close();  

  //2. generate the pop
  TransferFunctions * tfs = new TransferFunctions(s);
  Population * pop = makePopulation(args[3],s,tfs);
  
  //3. then the selector
  Selector * sel = makeSelector(args[4]);

  int g = atoi(args[5]);
  int iter = atoi(args[6]);

  string dfile = args[7];
  DataSet * set = new DataSet(false,dfile,0.0);
  FitnessEvaluator * de = new DatasetHyperNEAT(s,tfs,set);
  Evaluator * ev = new Evaluator(de); 
  LocalReproducer * rp = new LocalReproducer();
  int gc = 0; double sum=0; double sum2=0; double sum3=0;
  Phenotype * cbest = NULL;
  Phenotype * sbest = NULL;
  Phenotype * best = NULL;
  int osize=pop->getMembers()->size();
  int ss=0;
  double ocomp=0;
  time_t startt;
  double timesum = 0;
  for(int i2=0;i2<iter;i2++){
    gc = 0;
    best = NULL;
    startt = time(0);
    for(int i=0;i<g;i++){
      gc++;
//       cout << "before eval.." << endl;
      ev->evaluate(pop->getMembers(),pop->getMembers()->size());
//       cout << "after eval.." << endl;
      if((unsigned int)pop->getOriginalSize()!=pop->getMembers()->size())
 	cout << "size not right after eval.." << endl;
      ss=pop->getSpecies()->size();
//       cout << "before pop juggling.." << endl;
      pop->updateSpeciesStats();
      pop->sortmembers();
      pop->sortspecies();
      cbest = pop->getCopyOfCurrentBest();
      de->f(cbest);
//       cout << "before best juggling.." << endl;
      if(best==NULL){
	best = cbest;
      }else if(cbest->getFitness()>best->getFitness()){
	delete best;
	best = cbest;
      }else{
	delete cbest;
      }
//       cout << "after best juggling.." << endl;
      cout << "gen: "<<i<<" best: " << best->getFitness() << "worst: " << pop->getMembers()->at(pop->getMembers()->size()-1)->getFitness() << endl;
      if(((DatasetHyperNEAT*)de)->done(best)){
	i = g;
// 	cout << best;
	((DatasetHyperNEAT*)de)->runTest(best);
      }
//       cout << "after xor testing.." << endl;
      sel->select(pop,0);
      rp->reproduce(pop);
//       cout << "after select and reproduce.." << endl;
    }
    if(sbest==NULL)
      sbest = best;
    else if(best->getFitness()>sbest->getFitness())
      sbest = best;
    time_t ft = time(0)-startt;
    timesum += ft;
    cout << "run: "<<i2+1<<" gc: "<<gc<<" maxfitness: " << pop->getHighestFitness() 
	 << " sbest fitness: " << sbest->getFitness() << " species: " << ss 
	 << " time: " << ft << " time/gen: " << (double)ft/(double)gc << endl;
    sum += gc;
    sum2 += best->getGenome()->extrons();
    sum3 += best->getGenome()->countHidden();

    if(i2!=iter){
      if(pop->getOriginalSeed()!=NULL){
	Genome * oseed = pop->getOriginalSeed();
	int oelitism = pop->getOriginalInitialElitism();
	ocomp = pop->getOcomp();
	delete pop;
	pop = new Population(s,ocomp,tfs);
	//      oseed->setTfs(pop->getTfs());
	pop->genesis(oseed,osize,oelitism);
      }else{
	pop->resetSpawn();
      }	
    }

  }
  ((DatasetHyperNEAT*)de)->runTest(sbest);
  cout << ((DatasetHyperNEAT*)de)->output(sbest);
}
Esempio n. 12
0
 Vector NeRegSuf::xty(const Selector &inc)const{
   return inc.select(xty_);}
Esempio n. 13
0
 SpdMatrix NeRegSuf::xtx(const Selector &inc)const{
   reflect();
   return inc.select(xtx_);
 }
Esempio n. 14
0
 Vector QrRegSuf::xty(const Selector &inc)const{
   //    if(!current) refresh_qr();
   return inc.select(xty());
 }
Esempio n. 15
0
 SpdMatrix QrRegSuf::xtx(const Selector &inc)const{
   //    if(!current) refresh_qr();
   return inc.select(xtx());
 }