Exemplo n.º 1
0
void PanoramaTracker::run() {
  while (isRunning() && m_scaled.size() <= MAX_TRACKER_FRAMES) {
    QScopedPointer<QtCamGstSample> sample(m_input->sample());

    if (!sample) {
      continue;
    }

    if (!Tracker::isInitialized()) {
      QSize size = QSize(sample->width(), sample->height());
      int m_width = size.width() > 720 ? size.width() / 8 : size.width() / 4;
      int m_height = size.width() > 720 ? size.height() / 8 : size.height() / 4;
      m_inputSize = size;

      // TODO: This should be 5.0 but we fail to stitch sometimes if we set it to 5
      if (!Tracker::initialize(m_width, m_height, 2.0f)) {
	emit error(Panorama::ErrorTrackerInit);
	return;
      }
    }

    // Now we can process the sample:
    const guint8 *src = sample->data();

    QScopedArrayPointer<guint8>
      dst(new guint8[m_inputSize.width() * m_inputSize.height() * 3 / 2]);
    enum libyuv::FourCC fmt;

    switch (sample->format()) {
    case GST_VIDEO_FORMAT_UYVY:
      fmt = libyuv::FOURCC_UYVY;
      break;
    default:
      qCritical() << "Unsupported color format";
      emit error(Panorama::ErrorTrackerFormat);
      return;
    }

    guint8 *y = dst.data(),
      *u = y + m_inputSize.width() * m_inputSize.height(),
      *v = u + m_inputSize.width()/2 * m_inputSize.height()/2;

    if (ConvertToI420(src, sample->size(),
		      y, m_inputSize.width(),
		      u, m_inputSize.width() / 2,
		      v, m_inputSize.width() / 2,
		      0, 0,
		      m_inputSize.width(), m_inputSize.height(),
		      m_inputSize.width(), m_inputSize.height(),
		      libyuv::kRotate0, fmt) != 0) {
      emit error(Panorama::ErrorTrackerConvert);
      return;
    }

    QScopedArrayPointer<guint8> scaled(new guint8[m_width * m_height * 3 / 2]);
    guint8 *ys = scaled.data(),
      *us = ys + m_width * m_height,
      *vs = us + m_width/2 * m_height/2;

    // Now scale:
    // No need for error checking because the function always returns 0
    libyuv::I420Scale(y, m_inputSize.width(),
		      u, m_inputSize.width()/2,
		      v, m_inputSize.width()/2,
		      m_inputSize.width(), m_inputSize.height(),
		      ys, m_width,
		      us, m_width/2,
		      vs, m_width/2,
		      m_width, m_height,
		      libyuv::kFilterBilinear);

    int err = addFrame(scaled.data());

    if (err >= 0) {
      m_scaled.push_back(scaled.take());
      m_frames.push_back(dst.take());
      emit frameCountChanged();
    }
  }
}
Exemplo n.º 2
0
kernel void sample_z(global int *cur_y,
		     global int *cur_z,
		     global int *cur_r,
		     global int *z_by_ry,
		     global int *z_col_sum,
		     global int *obs,
		     global float *rand, 
		     uint N, uint D, uint K, uint f_img_width,
		     float lambda, float epislon, float theta) {
  
  const uint V_SCALE = 0, H_SCALE = 1, V_TRANS = 2, H_TRANS = 3, NUM_TRANS = 4;
  uint h, w, new_index; // variables used in the for loop

  uint nth = get_global_id(0); // n is the index of data
  uint kth = get_global_id(1); // k is the index of features

  uint f_img_height = D / f_img_width;

  // calculate the prior probability of each cell is 1
  float on_prob_temp = (z_col_sum[kth] - cur_z[nth * K + kth]) / (float)N; 
  float off_prob_temp = 1 - (z_col_sum[kth] - cur_z[nth * K + kth]) / (float)N;

  // retrieve the transformation applied to this feature by this object
  int v_scale = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + V_SCALE];
  int h_scale = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + H_SCALE];
  int v_dist = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + V_TRANS];
  int h_dist = cur_r[nth * (K * NUM_TRANS) + kth * NUM_TRANS + H_TRANS];
  int new_height = f_img_height + v_scale, new_width = f_img_width + h_scale;
  
  uint d, hh, ww;
  // extremely hackish way to calculate the likelihood
  for (d = 0; d < D; d++) {
    // if the kth feature can turn on a pixel at d
    if (cur_y[kth * D + d] == 1) {
      // unpack d into h and w and get new index
      h = d / f_img_width;
      w = d % f_img_width;

      for (hh = 0; hh < f_img_height; hh++) {
	for (ww = 0; ww < f_img_width; ww++) {
	  if ((int)round((float)hh / new_height * f_img_height) == h &
	      (int)round((float)ww / new_width * f_img_width) == w) {
	    new_index = ((v_dist + hh) % f_img_height) * f_img_width + (h_dist + ww) % f_img_width;
      
	    // then the corresponding observed pixel is at new_index
	    // so, if the observed pixel at new_index is on
	    if (obs[nth * D + new_index] == 1) {
	      // if the nth object previously has the kth feature
	      if (cur_z[nth * K + kth] == 1) {
		on_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index]) * (1 - epislon);
		off_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index] - 1) * (1 - epislon);
	      } else {
		on_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index] + 1) * (1 - epislon);
		off_prob_temp *= 1 - pow(1 - lambda, z_by_ry[nth * D + new_index]) * (1 - epislon);
	      }
	    } else {
	      on_prob_temp *= 1 - lambda;
	      off_prob_temp *= 1.0f;
	    }
	  } 
	}
      }
    }
  }
  
  //printf("index: %d post_on: %f post_off: %f\n", nth * K + kth, on_prob_temp, off_prob_temp);
  float post[2] = {on_prob_temp, off_prob_temp};
  uint labels[2] = {1, 0};
  pnormalize(post, 0, 2);
  //printf("before index: %d %f %f %d \n", nth * K + kth, post[0], post[1], cur_z[nth * K + kth]);
  cur_z[nth * K + kth] = sample(2, labels, post, 0, rand[nth * K + kth]);
  //printf("after index: %d %f %f %d \n", nth * K + kth, post[0], post[1], cur_z[nth * K + kth]);
}
Exemplo n.º 3
0
void Mesh2Cloud::addProperties() {
	auto group = gui()->properties()->add<Section>("Sampling", "group");
	
	auto samples = group->add<Number>("Samples Per Square Unit", "samples");
	samples->setDigits(0);
	samples->setMin(1);
	samples->setMax(100000);
	samples->setValue(100);
	group->add<Button>("Sample", "sample")->setCallback([&] () { auto ns = gui()->properties()->get<Number>({"group", "samples"})->value(); sample(ns); });;

	auto iogroup = gui()->properties()->add<Section>("Input/Output", "iogroup");
	
	auto outFile = iogroup->add<File>("Save to: ", "outFile");
	outFile->setMode(File::SAVE);
	outFile->setCallback([&] (fs::path p) {
		pcl::io::savePCDFileBinary(p.string(), *m_cloud);
		gui()->log()->info("Saved pointcloud to: \""+p.string()+"\"");
	});
	outFile->disable();
}
Exemplo n.º 4
0
nsresult
EMEH264Decoder::GmpInput(MP4Sample* aSample)
{
  MOZ_ASSERT(IsOnGMPThread());

  nsAutoPtr<MP4Sample> sample(aSample);
  if (!mGMP) {
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  if (sample->crypto.valid) {
    CDMCaps::AutoLock caps(mProxy->Capabilites());
    MOZ_ASSERT(caps.CanDecryptAndDecodeVideo());
    const auto& keyid = sample->crypto.key;
    if (!caps.IsKeyUsable(keyid)) {
      nsRefPtr<nsIRunnable> task(new DeliverSample(this, sample.forget()));
      caps.CallWhenKeyUsable(keyid, task, mGMPThread);
      return NS_OK;
    }
  }


  mLastStreamOffset = sample->byte_offset;

  GMPVideoFrame* ftmp = nullptr;
  GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
  if (GMP_FAILED(err)) {
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  gmp::GMPVideoEncodedFrameImpl* frame = static_cast<gmp::GMPVideoEncodedFrameImpl*>(ftmp);
  err = frame->CreateEmptyFrame(sample->size);
  if (GMP_FAILED(err)) {
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  memcpy(frame->Buffer(), sample->data, frame->Size());

  frame->SetEncodedWidth(mConfig.display_width);
  frame->SetEncodedHeight(mConfig.display_height);
  frame->SetTimeStamp(sample->composition_timestamp);
  frame->SetCompleteFrame(true);
  frame->SetDuration(sample->duration);
  if (sample->crypto.valid) {
    frame->InitCrypto(sample->crypto);
  }
  frame->SetFrameType(sample->is_sync_point ? kGMPKeyFrame : kGMPDeltaFrame);
  frame->SetBufferType(GMP_BufferLength32);

  nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
  nsresult rv = mGMP->Decode(frame, false, info, 0);
  if (NS_FAILED(rv)) {
    mCallback->Error();
    return rv;
  }

  return NS_OK;
}
Exemplo n.º 5
0
	// Sample a hidden neuron, given a visible neuron vector
	double sample_hidden(rbm* r, unsigned int j, std::vector<int> visible) {
	  return sample(hidden_probability(r, j, visible));
	}
Exemplo n.º 6
0
void Shape::sample(const core::Vec3 &ps, float u1, float u2, float u3, int *primID, core::Vec3 *p, core::Vec3 *n) const
{
    return sample(u1, u2, u3, primID, p, n);
}
Exemplo n.º 7
0
bool ClassificationData::loadDatasetFromFile(const std::string &filename){

	std::fstream file;
	file.open(filename.c_str(), std::ios::in);
	UINT numClasses = 0;
	clear();

	if( !file.is_open() ){
        errorLog << "loadDatasetFromFile(const std::string &filename) - could not open file!" << std::endl;
		return false;
	}

	std::string word;

	//Check to make sure this is a file with the Training File Format
	file >> word;
	if(word != "GRT_LABELLED_CLASSIFICATION_DATA_FILE_V1.0"){
        errorLog << "loadDatasetFromFile(const std::string &filename) - could not find file header!" << std::endl;
		file.close();
		return false;
	}

    //Get the name of the dataset
	file >> word;
	if(word != "DatasetName:"){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find DatasetName header!" << std::endl;
        errorLog << word << std::endl;
		file.close();
		return false;
	}
	file >> datasetName;

    file >> word;
	if(word != "InfoText:"){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find InfoText header!" << std::endl;
		file.close();
		return false;
	}

    //Load the info text
    file >> word;
    infoText = "";
    while( word != "NumDimensions:" ){
        infoText += word + " ";
        file >> word;
    }

	//Get the number of dimensions in the training data
	if( word != "NumDimensions:" ){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find NumDimensions header!" << std::endl;
		file.close();
		return false;
	}
	file >> numDimensions;

	//Get the total number of training examples in the training data
	file >> word;
	if( word != "TotalNumTrainingExamples:" && word != "TotalNumExamples:" ){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find TotalNumTrainingExamples header!" << std::endl;
		file.close();
		return false;
	}
	file >> totalNumSamples;

	//Get the total number of classes in the training data
	file >> word;
	if(word != "NumberOfClasses:"){
        errorLog << "loadDatasetFromFile(string filename) - failed to find NumberOfClasses header!" << std::endl;
		file.close();
		return false;
	}
	file >> numClasses;

	//Resize the class counter buffer and load the counters
	classTracker.resize(numClasses);

	//Get the total number of classes in the training data
	file >> word;
	if(word != "ClassIDsAndCounters:"){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find ClassIDsAndCounters header!" << std::endl;
		file.close();
		return false;
	}

	for(UINT i=0; i<classTracker.getSize(); i++){
		file >> classTracker[i].classLabel;
		file >> classTracker[i].counter;
        file >> classTracker[i].className;
	}

    //Check if the dataset should be scaled using external ranges
	file >> word;
	if(word != "UseExternalRanges:"){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find UseExternalRanges header!" << std::endl;
		file.close();
		return false;
	}
    file >> useExternalRanges;

    //If we are using external ranges then load them
    if( useExternalRanges ){
        externalRanges.resize(numDimensions);
        for(UINT i=0; i<externalRanges.getSize(); i++){
            file >> externalRanges[i].minValue;
            file >> externalRanges[i].maxValue;
        }
    }

	//Get the main training data
	file >> word;
	if( word != "LabelledTrainingData:" && word != "Data:"){
        errorLog << "loadDatasetFromFile(const std::string &filename) - failed to find LabelledTrainingData header!" << std::endl;
		file.close();
		return false;
	}

	ClassificationSample tempSample( numDimensions );
	data.resize( totalNumSamples, tempSample );

	for(UINT i=0; i<totalNumSamples; i++){
        UINT classLabel = 0;
        VectorFloat sample(numDimensions,0);
		file >> classLabel;
		for(UINT j=0; j<numDimensions; j++){
			file >> sample[j];
		}
        data[i].set(classLabel, sample);
	}

	file.close();
	
    //Sort the class labels
    sortClassLabels();
	
	return true;
}
Exemplo n.º 8
0
int main() {
  srand(time(NULL));
  sample(10000000, 1000000);
}
Exemplo n.º 9
0
    virtual void process()
    {
        ActPrintLog("GlobalMergeActivityMaster::process");
        CMasterActivity::process();     
        IHThorMergeArg *helper = (IHThorMergeArg *)queryHelper();   
        Owned<IThorRowInterfaces> rowif = createRowInterfaces(helper->queryOutputMeta());
        CThorKeyArray sample(*this, rowif,helper->querySerialize(),helper->queryCompare(),helper->queryCompareKey(),helper->queryCompareRowKey());

        unsigned n = container.queryJob().querySlaves();
        mptag_t *replytags = new mptag_t[n];
        mptag_t *intertags = new mptag_t[n];
        unsigned i;
        for (i=0;i<n;i++) {
            replytags[i] = TAG_NULL;
            intertags[i] = TAG_NULL;
        }
        try {
            for (i=0;i<n;i++) {
                if (abortSoon)
                    return;
                CMessageBuffer mb;
#ifdef _TRACE
                ActPrintLog("Merge process, Receiving on tag %d",replyTag);
#endif
                rank_t sender;
                if (!receiveMsg(mb, RANK_ALL, replyTag, &sender)||abortSoon) 
                    return;
#ifdef _TRACE
                ActPrintLog("Merge process, Received sample from %d",sender);
#endif
                sender--;
                assertex((unsigned)sender<n);
                assertex(replytags[(unsigned)sender]==TAG_NULL);
                deserializeMPtag(mb,replytags[(unsigned)sender]);
                deserializeMPtag(mb,intertags[(unsigned)sender]);
                sample.deserialize(mb,true);
            }
            ActPrintLog("GlobalMergeActivityMaster::process samples merged");
            sample.createSortedPartition(n);
            ActPrintLog("GlobalMergeActivityMaster::process partition generated");
            for (i=0;i<n;i++) {
                if (abortSoon)
                    break;
                CMessageBuffer mb;
                mb.append(n);
                for (unsigned j = 0;j<n;j++)
                    serializeMPtag(mb,intertags[j]);
                sample.serialize(mb);
#ifdef _TRACE
                ActPrintLog("Merge process, Replying to node %d tag %d",i+1,replytags[i]);
#endif
                if (!queryJobChannel().queryJobComm().send(mb, (rank_t)i+1, replytags[i]))
                    break;
            }
        
        }
        catch (IException *e) {
            delete [] replytags;
            delete [] intertags;
            ActPrintLog(e, "MERGE");
            throw;
        }
        delete [] replytags;
        delete [] intertags;
        ActPrintLog("GlobalMergeActivityMaster::process exit");
    }
Exemplo n.º 10
0
double UniformSample(double max) {
    boost::uniform_real<> dist(0, max);
    boost::variate_generator<boost::mt19937&, boost::uniform_real<> > sample(
            gen, dist);
    return sample();
}
Exemplo n.º 11
0
double DeltaStarGibbs(const vector<int> &oldClique,
		      const vector<vector<int> > &oldComponents,
		      int Q, int G, const int *S,
		      double *Delta,const double *r,
		      const double *sigma2,const double *phi,
		      const double *tau2R,const double *b,
		      const double *nu,const int *delta,
		      const int *psi,const double *x,
		      const vector<vector<vector<double> > > &Omega,
		      Random &ran,int draw) {

  // compute prior presicion matrix

  //  cout << "start computing prior precision matrix" << endl;

  vector<vector<vector<double> > > OmegaInv(Omega);
  vector<double> OmegaDet(Omega.size(),0.0);
  int k;
  for (k = 0; k < OmegaInv.size(); k++)
    OmegaDet[k] = inverse(Omega[k],OmegaInv[k]);

  vector<vector<vector<double> > > OmegaSep;
  OmegaSep.resize(Omega.size());
  for (k = 0; k < OmegaSep.size(); k++) {
    OmegaSep[k].resize(oldComponents[k].size());
    int i;
    for (i = 0; i < oldComponents[k].size(); i++) {
      OmegaSep[k][i].resize(oldComponents[k].size());
      int j;
      for (j = 0; j < oldComponents[k].size(); j++) {
	OmegaSep[k][i][j] = Omega[oldClique[k]][oldComponents[k][i]][oldComponents[k][j]];
      }
    }
  }

  vector<vector<vector<double> > > OmegaSepInv(OmegaSep);
  vector<double> OmegaSepDet(Omega.size(),0.0);
  for (k = 0; k < OmegaSep.size(); k++) {
    if (OmegaSep[k].size() > 0)
      OmegaSepDet[k] = inverse(OmegaSep[k],OmegaSepInv[k]);
  }

  vector<map<int,double> > OmegaInvSparse;
  OmegaInvSparse.resize(G);
  int g;
  for (g = 0; g < G; g++)
    OmegaInvSparse[g].clear();

  vector<vector<int> > nr;
  nr.resize(Omega.size());
  g = 0;
  for (k = 0; k < Omega.size(); k++) {

    nr[k].resize(Omega[k].size());
    int gg;
    for (gg = 0; gg < oldComponents[k].size(); gg++)
      nr[k][gg] = nr[oldClique[k]][oldComponents[k][gg]];
    for (gg = oldComponents[k].size(); gg < Omega[k].size(); gg++) {
      nr[k][gg] = g;
      g++;
    }
  }

  // print to files
  /*
  for (k = 0; k < Omega.size(); k++) {
    char filename[120];
    sprintf(filename,"Omega-%d.txt",k);
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < Omega[k].size(); i++) {
      for (j = 0; j < Omega[k][i].size(); j++)
	fprintf(out,"%20.18e ",Omega[k][i][j]);
      fprintf(out,"\n");
    }
    fclose(out);
  }

  for (k = 0; k < OmegaInv.size(); k++) {
    char filename[120];
    sprintf(filename,"OmegaInv-%d.txt",k);
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < OmegaInv[k].size(); i++) {
      for (j = 0; j < OmegaInv[k][i].size(); j++)
	fprintf(out,"%20.18e ",OmegaInv[k][i][j]);
      fprintf(out,"\n");
    }
    fclose(out);
  }

  for (k = 1; k < OmegaSep.size(); k++) {
    char filename[120];
    sprintf(filename,"OmegaSep-%d.txt",k);
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < OmegaSep[k].size(); i++) {
      for (j = 0; j < OmegaSep[k][i].size(); j++)
	fprintf(out,"%20.18e ",OmegaSep[k][i][j]);
      fprintf(out,"\n");
    }
    fclose(out);
  }

  for (k = 1; k < OmegaSepInv.size(); k++) {
    char filename[120];
    sprintf(filename,"OmegaSepInv-%d.txt",k);
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < OmegaSepInv[k].size(); i++) {
      for (j = 0; j < OmegaSepInv[k][i].size(); j++)
	fprintf(out,"%20.18e ",OmegaSepInv[k][i][j]);
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing to files


  // establish and print large Omega
  /*
  vector<vector<double> > OmegaTotal;
  OmegaTotal.resize(G);
  for (g = 0; g < G; g++) {
    OmegaTotal[g].resize(G);
    int gg;
    for (gg = 0; gg < G; gg++)
      OmegaTotal[g][gg] = 0.0;
  }

  for (k = 0; k < OmegaInv.size(); k++) {
    int r;
    for (r = 0; r < nr[k].size(); r++) {
      int g = nr[k][r];
      int s;
      for (s = 0; s <= r; s++) {
	int gg = nr[k][s];
	double value = Omega[k][r][s];
	OmegaTotal[g][gg] = value;
	OmegaTotal[gg][g] = value;
      }
    }
  }

  {
    char filename[120];
    sprintf(filename,"OmegaTotal.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < OmegaTotal.size(); i++) {
      for (j = 0; j < OmegaTotal[i].size(); j++)
	fprintf(out,"%20.18e ",OmegaTotal[i][j]);
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing files

  for (k = 0; k < OmegaInv.size(); k++) {
    int r;
    for (r = 0; r < nr[k].size(); r++) {
      int s;
      for (s = 0; s < nr[k].size(); s++) {
	int g = nr[k][r];
	int gg = nr[k][s];
	double value = OmegaInv[k][r][s];

	map<int,double>::iterator it;
	it = OmegaInvSparse[g].find(gg);
	if (it == OmegaInvSparse[g].end())
	  OmegaInvSparse[g].insert(pair<int,double>(gg,value));
	else {
	  OmegaInvSparse[g][gg] += value;
	}
      }
    }
  }

  for (k = 0; k < OmegaSepInv.size(); k++) {
    int r;
    for (r = 0; r < oldComponents[k].size(); r++) {
      int s;
      for (s = 0; s < oldComponents[k].size(); s++) {
	int g = nr[k][r];
	int gg = nr[k][s];
	double value = OmegaSepInv[k][r][s];

	OmegaInvSparse[g][gg] -= value;
      }
    }
  }

  // print OmegaInv to files
  /*
  {
    char filename[120];
    sprintf(filename,"OmegaTotalInv.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < OmegaTotal.size(); i++) {
      for (j = 0; j < OmegaTotal[i].size(); j++) {
	double value = 0.0;
	if (OmegaInvSparse[i].find(j) != OmegaInvSparse[i].end())
	  value = OmegaInvSparse[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing


  // establish covariance matrix R and its inverse

  vector<vector<double> > R;
  R.resize(Q);
  int p;
  for (p = 0; p < Q; p++) {
    R[p].resize(Q);
  }
  int q;
  for (p = 0; p < Q; p++) {
    R[p][p] = tau2R[p];
    for (q = p + 1; q < Q; q++) {
      R[p][q] = sqrt(tau2R[p] * tau2R[q]) * r[qq2index(p,q,Q)];
      R[q][p] = R[p][q];
    }
  }
  vector<vector<double> > RInverse;

  inverse(R,RInverse);
  
  // print RInverse to file
  /*
  {
    char filename[120];
    sprintf(filename,"RInverse.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < RInverse.size(); i++) {
      for (j = 0; j < RInverse[i].size(); j++)
	fprintf(out,"%20.18e ",RInverse[i][j]);
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing RInverse


  // compute precision matrix of full conditional

  //  cout << "start computing precision matrix of full conditional" << endl;

  vector<map<int,double> > VinvSparse;
  VinvSparse.resize(G*Q);
  for (k = 0; k < G*Q; k++)
    VinvSparse[k].clear();

  for (g = 0; g < G; g++) {
    map<int,double>::iterator it;
    for (it = OmegaInvSparse[g].begin(); it != OmegaInvSparse[g].end(); it++) {
      int gg = it->first;
      double value = it->second;
      
      int qq;
      for (q = 0; q < Q; q++) {
	for (qq = 0; qq < Q; qq++) {
	  int index = g * Q + q;
      	  int indexindex = gg * Q + qq;
	  double cov = value * RInverse[q][qq];
	  
	  VinvSparse[index].insert(pair<int,double>(indexindex,cov));
	}
      }
    }
  }

  // print precision matrix of full conditional
  /*
  {
    char filename[120];
    sprintf(filename,"VinvReduced.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < VinvSparse.size(); i++) {
      for (j = 0; j < VinvSparse.size(); j++) {
	double value = 0.0;
	if (VinvSparse[i].find(j) != VinvSparse[i].end())
	  value = VinvSparse[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing

  // add effect of data

  vector<double> l(VinvSparse.size(),0.0);
  vector<double> L(VinvSparse.size(),0.0);
  for (g = 0; g < G; g++)
    for (q = 0; q < Q; q++) {
      if (delta[qg2index(q,g,Q,G)] == 1) {
	int index = g * Q + q;

	double v0 = sigma2[qg2index(q,g,Q,G)] * phi[qg2index(q,g,Q,G)];
	double v1 = sigma2[qg2index(q,g,Q,G)] / phi[qg2index(q,g,Q,G)];
	double diag = 0.0;
	double ll = 0.0;
	int s;
	for (s = 0; s < S[q]; s++) {
	  double variance = psi[sq2index(s,q,S,Q)] == 0 ? v0 : v1;
	  diag += exp(b[q] * log(sigma2[qg2index(q,g,Q,G)])) / variance;

	  ll += (2.0 * psi[sq2index(s,q,S,Q)] - 1.0) * (x[sqg2index(s,q,g,S,Q,G)] - nu[qg2index(q,g,Q,G)]) / variance;
	}
	ll *= exp(0.5 * b[q] * log(sigma2[qg2index(q,g,Q,G)]));
	L[index] = diag;
	l[index] = ll;
      }
    }


  int index;
  for (index = 0; index < L.size(); index++)
    VinvSparse[index][index] += L[index];

  // print L and l
  /*
  {
    char filename[120];
    sprintf(filename,"l.txt");
    FILE *out = fopen(filename,"w");
    int i;
    for (i = 0; i < l.size(); i++) {
      fprintf(out,"%20.18e\n",l[i]);
    }
    fclose(out);

    sprintf(filename,"L.txt");
    out = fopen(filename,"w");
    for (i = 0; i < L.size(); i++) {
      fprintf(out,"%20.18e\n",L[i]);
    }
    fclose(out);

  }
  */
  // print precision matrix of full conditional
  /*
  {
    char filename[120];
    sprintf(filename,"Vinv.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < VinvSparse.size(); i++) {
      for (j = 0; j < VinvSparse.size(); j++) {
	double value = 0.0;
	if (VinvSparse[i].find(j) != VinvSparse[i].end())
	  value = VinvSparse[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing

  // establish a version of VinvSparse with the indices reversed

  vector<map<int,double> > VinvSparseReversed;
  VinvSparseReversed.resize(VinvSparse.size());
  for (k = 0; k < VinvSparseReversed.size(); k++) 
    VinvSparseReversed[k].clear();
  for (k = 0; k < VinvSparseReversed.size(); k++) {
    map<int,double>::iterator it;
    for (it = VinvSparse[k].begin(); it != VinvSparse[k].end(); it++) {
      map<int,double>::iterator itextra = VinvSparse[k].end();

      int r = it->first;
      double value = it->second;

      int kk = VinvSparseReversed.size() - k - 1;
      int rr = VinvSparseReversed.size() - r - 1;
      VinvSparseReversed[rr].insert(pair<int,double>(kk,value));
    }
  }

  // print VinvSparseReversed
  /*
  {
    char filename[120];
    sprintf(filename,"VinvReversed.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < VinvSparseReversed.size(); i++) {
      for (j = 0; j < VinvSparseReversed.size(); j++) {
	double value = 0.0;
	if (VinvSparseReversed[i].find(j) != VinvSparseReversed[i].end())
	  value = VinvSparseReversed[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing

  // perform Cholesky decomposition for (the sparse matrix) VinvSparseReversed

  //  cout << "start Cholesky factorization" << endl;

  vector<map<int,double> > cholReversed;
  cholReversed.resize(Q * G);
  for (index = 0; index < cholReversed.size(); index++)
    cholReversed[index].clear();

  int N = cholReversed.size();
  int i;
  for (i = 0; i < N; i++) {
    map<int,double>::iterator it;
    for (it = VinvSparseReversed[i].find(i); it != VinvSparseReversed[i].end(); it++) {
      int j = it->first;
      double value = it->second;

      double sum = value;
      map<int,double>::iterator it2;
      for (it2 = cholReversed[i].begin(); it2 != cholReversed[i].end(); it2++) {
	if (it2->first < i) {
	  int k = it2->first;
	  double valuei = it2->second;
	  map<int,double>::iterator it3 = cholReversed[j].find(k);
	  if (it3 != cholReversed[j].end()) {
	    double valuej = it3->second;
	    sum -= valuei * valuej;
	  }
	}
      }

      if (i == j && sum <= 0.0) {
	fprintf(stderr,"DeltaStarGibbs: Matrix is not positive definite!\n");
	exit(-1);
      }

      if (i == j) 
	cholReversed[j].insert(pair<int,double>(i,sqrt(sum)));
      else
	cholReversed[j].insert(pair<int,double>(i,sum / cholReversed[i][i]));
    }
  }
  /*
  // print (reversed) cholesky matrix

  {
    char filename[120];
    sprintf(filename,"cholReversed.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < cholReversed.size(); i++) {
      for (j = 0; j < cholReversed.size(); j++) {
	double value = 0.0;
	if (cholReversed[i].find(j) != cholReversed[i].end())
	  value = cholReversed[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing

  // establish er version of chol with the indices reversed back

  vector<map<int,double> > chol;
  chol.resize(cholReversed.size());
  for (k = 0; k < chol.size(); k++)
    chol[k].clear();
  for (k = 0; k < cholReversed.size(); k++) {
    map<int,double>::iterator it;
    for (it = cholReversed[k].begin(); it != cholReversed[k].end(); it++) {
      int r = it->first;
      double value = it->second;

      int kk = chol.size() - k - 1;
      int rr = chol.size() - r - 1;
      chol[rr].insert(pair<int,double>(kk,value));
    }
  }

  // print cholesky matrix
  /*
  {
    char filename[120];
    sprintf(filename,"chol.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < chol.size(); i++) {
      for (j = 0; j < chol.size(); j++) {
	double value = 0.0;
	if (chol[i].find(j) != chol[i].end())
	  value = chol[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // establish representation of the transpose of the cholesky matrix

  vector<map<int,double> > cholT;
  cholT.resize(chol.size());
  for (k = 0; k < cholT.size(); k++) 
    cholT[k].clear();
  for (i = 0; i < chol.size(); i++) {
    map<int,double>::iterator it;
    for (it = chol[i].begin(); it != chol[i].end(); it++) {
      int j = it->first;
      double value = it->second;

      cholT[j].insert(pair<int,double>(i,value));
    }
  }

  // print cholT
  /*
  {
    char filename[120];
    sprintf(filename,"cholT.txt");
    FILE *out = fopen(filename,"w");
    int i,j;
    for (i = 0; i < cholT.size(); i++) {
      for (j = 0; j < cholT.size(); j++) {
	double value = 0.0;
	if (cholT[i].find(j) != cholT[i].end())
	  value = cholT[i][j];
	fprintf(out,"%20.18e ",value);
      }
      fprintf(out,"\n");
    }
    fclose(out);
  }
  */
  // finished printing

  //  cout << "start computing mean value" << endl;

  vector<double> u(l.size(),0.0);
  for (i = u.size() - 1; i >= 0; i--) {
    double diag = 0.0;
    double sum = 0.0;
    map<int,double>::iterator it;
    for (it = cholT[i].begin(); it != cholT[i].end(); it++) {
      int j= it->first;
      double value = it->second;
      
      if (i == j)
	diag = value;
      else
	sum += value * u[j];
    }
    u[i] = (l[i] - sum) / diag;
  }

  vector<double> mean(u.size(),0.0);
  for (i = 0; i < mean.size(); i++) {
    double diag = 0.0;
    double sum = 0.0;
    map<int,double>::iterator it;
    for (it = chol[i].begin(); it != chol[i].end(); it++) {
      int j = it->first;
      double value = it->second;

      if (j == i)
	diag = value;
      else
	sum += value * mean[j];
    }
    mean[i] = (u[i] - sum) / diag;
  }
   
  // print mean value
  /*
  {
    char filename[120];
    sprintf(filename,"mean.txt");
    FILE *out = fopen(filename,"w");
    int i;
    for (i = 0; i < mean.size(); i++) {
      fprintf(out,"%20.18e\n",mean[i]);
    }
    fclose(out);
  }
  */
  // finished printing
  
 

  // generate a sample with zero mean, or compute the sample that should have been sampled

  //  cout << "start sampling" << endl;
  
  vector<double> sample(chol.size(),0.0);
  if (draw == 1) {
    vector<double> z(chol.size(),0.0);
    for (k = 0; k < z.size(); k++)
      z[k] = ran.Norm01();

    // print z
    /*
    {
      char filename[120];
      sprintf(filename,"z.txt");
      FILE *out = fopen(filename,"w");
      int i;
      for (i = 0; i < z.size(); i++) {
	fprintf(out,"%20.18e\n",z[i]);
      }
      fclose(out);
    }
    */
    // finished printing


    
    for (i = 0; i < sample.size(); i++) {
      double diag = 0.0;
      double sum = 0.0;
      map<int,double>::iterator it;
      for (it = chol[i].begin(); it != chol[i].end(); it++) {
	int j = it->first;
	double value = it->second;
	
	if (j == i)
	  diag = value;
	else
	  sum += value * sample[j];
      }
      sample[i] = (z[i] - sum) / diag;
    }

    // print sample
    /*
  {
    char filename[120];
    sprintf(filename,"sample.txt");
    FILE *out = fopen(filename,"w");
    int i;
    for (i = 0; i < sample.size(); i++) {
      fprintf(out,"%20.18e\n",sample[i]);
    }
    fclose(out);
  }
    */
  // finished printing

  }
  else {  // compute sample[i] necessary to generate the current values
    // compute DeltaStar
    
    vector<vector<double> > DeltaStar;
    DeltaStar.resize(G);
    for (g = 0; g < G; g++) {
      DeltaStar[g].resize(Q);
      for (q = 0; q < Q; q++) {
	DeltaStar[g][q] = Delta[qg2index(q,g,Q,G)] / exp(0.5 * b[q] * log(sigma2[qg2index(q,g,Q,G)]));
      }
    }

    // subtract mean value

    for (g = 0; g < G; g++)
      for (q = 0; q < Q; q++) {
	int index = g * Q + q;

	DeltaStar[g][q] -= mean[index];
      }

    // insert value in sample[]

    for (g = 0; g < G; g++)
      for (q = 0; q < Q; q++) {
	int index = g * Q + q;

	sample[index] = DeltaStar[g][q];
      }
  }

  // compute potential for sample

  double pot = 0.0;
  for (i = 0; i < sample.size(); i++) {
    double diag = 0.0;
    double sum = 0.0;
    map<int,double>::iterator it;
    for (it = chol[i].begin(); it != chol[i].end(); it++) {
      int j = it->first;
      double value = it->second;
      
      if (j == i)
	diag = value;
      else
	sum += value * sample[j];
    }
    double mean = - sum / diag;
    double variance = 1.0 / (diag * diag);
    pot += ran.PotentialGaussian(variance,mean,sample[i]);
  }

  if (draw == 1) { // add mean value and insert in data structure
    for (k = 0; k < Q * G; k++)
      sample[k] += mean[k];

    for (g = 0; g < G; g++)
      for (q = 0; q < Q; q++) {
	int index = g * Q + q;

	double DeltaStar = sample[index];
	
	Delta[qg2index(q,g,Q,G)] = DeltaStar * exp(0.5 * b[q] * log(sigma2[qg2index(q,g,Q,G)]));
      }
  }
  
  //  cout << "finished" << endl;

  return pot;
}
Exemplo n.º 12
0
void GaussianMean1DRegressionCompute(const QUESO::BaseEnvironment& env,
    double priorMean, double priorVar, const likelihoodData& dat)
{
  // parameter space: 1-D on (-infinity, infinity)
  QUESO::VectorSpace<P_V, P_M> paramSpace(
					 env,       // queso environment
					 "param_",  // name prefix
					 1,         // dimensions
					 NULL);     // names

  P_V paramMin(paramSpace.zeroVector());
  P_V paramMax(paramSpace.zeroVector());
  paramMin[0] = -INFINITY;
  paramMax[0] = INFINITY;
  QUESO::BoxSubset<P_V, P_M> paramDomain(
					"paramBox_",  // name prefix
					paramSpace,   // vector space
					paramMin,     // min values
					paramMax);    // max values

  // gaussian prior with user supplied mean and variance
  P_V priorMeanVec(paramSpace.zeroVector());
  P_V priorVarVec(paramSpace.zeroVector());
  priorMeanVec[0] = priorMean;
  priorVarVec[0] = priorVar;
  QUESO::GaussianVectorRV<P_V, P_M> priorRv("prior_", paramDomain, priorMeanVec,
      priorVarVec);

  // likelihood is important
  QUESO::GenericScalarFunction<P_V, P_M> likelihoodFunctionObj(
							      "like_",                   // name prefix
							      paramDomain,               // image set
							      LikelihoodFunc<P_V, P_M>,  // routine
							      (void *) &dat,             // routine data ptr
							      true);                     // routineIsForLn

  QUESO::GenericVectorRV<P_V, P_M> postRv(
      "post_",       // name prefix
       paramSpace);  // image set


  // Initialize and solve the Inverse Problem with Bayes multi-level sampling
  QUESO::StatisticalInverseProblem<P_V, P_M> invProb(
      "",                     // name prefix
      NULL,                   // alt options
      priorRv,                // prior RV
      likelihoodFunctionObj,  // likelihood fcn
      postRv);                // posterior RV

  invProb.solveWithBayesMLSampling();

  // compute mean and second moment of samples on each proc via Knuth online mean/variance algorithm
  int N = invProb.postRv().realizer().subPeriod();
  double subMean = 0.0;
  double subM2 = 0.0;
  double delta;
  P_V sample(paramSpace.zeroVector());
  for (int n = 1; n <= N; n++) {
    invProb.postRv().realizer().realization(sample);
    delta = sample[0] - subMean;
    subMean += delta / n;
    subM2 += delta * (sample[0] - subMean);
  }

  // gather all Ns, means, and M2s to proc 0
  std::vector<int> unifiedNs(env.inter0Comm().NumProc());
  std::vector<double> unifiedMeans(env.inter0Comm().NumProc());
  std::vector<double> unifiedM2s(env.inter0Comm().NumProc());
  MPI_Gather(&N, 1, MPI_INT, &(unifiedNs[0]), 1, MPI_INT, 0,
      env.inter0Comm().Comm());
  MPI_Gather(&subMean, 1, MPI_DOUBLE, &(unifiedMeans[0]), 1, MPI_DOUBLE, 0,
      env.inter0Comm().Comm());
  MPI_Gather(&subM2, 1, MPI_DOUBLE, &(unifiedM2s[0]), 1, MPI_DOUBLE, 0,
      env.inter0Comm().Comm());

  // get the total number of likelihood calls at proc 0
  unsigned long totalLikelihoodCalls = 0;
  MPI_Reduce(&likelihoodCalls, &totalLikelihoodCalls, 1, MPI_UNSIGNED_LONG,
      MPI_SUM, 0, env.inter0Comm().Comm());

  // compute global posterior mean and std via Chan algorithm, output results on proc 0
  if (env.inter0Rank() == 0) {
    int postN = unifiedNs[0];
    double postMean = unifiedMeans[0];
    double postVar = unifiedM2s[0];
    for (unsigned int i = 1; i < unifiedNs.size(); i++) {
      delta = unifiedMeans[i] - postMean;
      postMean = (postN * postMean + unifiedNs[i] * unifiedMeans[i]) /
        (postN + unifiedNs[i]);
      postVar += unifiedM2s[i] + delta * delta *
        (((double)postN * unifiedNs[i]) / (postN + unifiedNs[i]));
      postN += unifiedNs[i];
    }
    postVar /= postN;

    //compute exact answer - available in this case since the exact posterior is a gaussian
    N = dat.dataSet.size();
    double dataSum = 0.0;
    for (int i = 0; i < N; i++)
      dataSum += dat.dataSet[i];
    double datMean = dataSum / N;
    double postMeanExact = (N * priorVar / (N * priorVar + dat.samplingVar)) *
      datMean + (dat.samplingVar / (N * priorVar + dat.samplingVar)) * priorMean;
    double postVarExact = 1.0 / (N / dat.samplingVar + 1.0 / priorVar);

    std::cout << "Number of posterior samples: " << postN << std::endl;
    std::cout << "Estimated posterior mean: " << postMean << " +/- "
      << std::sqrt(postVar) << std::endl;
    std::cout << "Likelihood function calls: " << totalLikelihoodCalls
      << std::endl;
    std::cout << "\nExact posterior: Gaussian with mean " << postMeanExact
      << ", standard deviation " << std::sqrt(postVarExact) << std::endl;
  }
}
Exemplo n.º 13
0
Point Geometry::sample(const Point&, const GeomSample &gs, Normal &normal) const {
	return sample(gs, normal);
}
Exemplo n.º 14
0
int main (int argc, char **argv)
{
	char c;
	unsigned int flag = 0;
	int interval = 1, count = 0, max_count = 1;
	struct vg_data vg_now, vg_prev;
	VMGuestLibError ret;

	while ((c = getopt(argc, argv, "i:c:hvru")) != -1) {
		switch(c) {
		case 'i':
			interval = atoi(optarg);
			break;
			
		case 'c':
			max_count = atoi(optarg);
			break;
			
		case 'h':
			usage();
			return 0;
			break;
			
		case 'r': /* raw output */
			flag |= FLAG_RAWOUTPUT;
			break;
			
		case 'v': /* verbose mode */
			flag |= FLAG_VERBOSE;
			break;
			
		case 'u':
			flag |= FLAG_UNIXTIME;
			break;
			
		default:
			printf("Unkown option '%c'\n", c);
		}
	}

	memset(&vg_now, 0x0, sizeof(struct vg_data));
	
	ret = VMGuestLib_OpenHandle(&vg_now.handle);
	if (ret != VMGUESTLIB_ERROR_SUCCESS) {
		if (IS_VERBOSE(flag)) {
			printf("VMGuestLib_OpenHandle: %d (%s)\n",
			       ret, VMGuestLib_GetErrorText(ret));
		}
		return 1;
	}
	
	if (sample(&vg_now, flag) != 0) {
		goto bailout;
	}

	if (IS_RAWOUTPUT(flag)) {
		printf("Timestamp "
		       "SessionId "
		       "HostProcessorSpeed "
		       "CpuReservationMHz CpuLimitMHz CpuShares "
		       "ElapsedMs CpuUsedMs CpuStolenMs "
		       "MemReservationMB MemLimitMB MemShares MemMappedMB "
		       "MemActiveMB MemOverheadMB MemBalloonedMB MemSwappedMB "
		       "MemSharedMB MemSharedSavedMB MemUsedMB\n"
		);
	} else {
		printf("%-24s %-8s %-8s %8s %8s %8s %8s\n",
		       "Timestamp", "intvl(g)", "intvl(h)",
		       "used", "stolen", "%used", "%ready");
	}
	for (count = 0; count < max_count; count++) {
		vg_prev = vg_now;
		sleep(interval);
		if (sample(&vg_now, flag) != 0) {
			goto bailout;
		}
		output(&vg_now, &vg_prev, flag);
	}
	
bailout:	
	ret = VMGuestLib_CloseHandle(vg_now.handle);
	if (ret != VMGUESTLIB_ERROR_SUCCESS) {
		if (IS_VERBOSE(flag)) {
			printf("VMGuestLib_CloseHandle: %d (%s)\n",
			       ret, VMGuestLib_GetErrorText(ret));
		}
		return 1;
	}
	return 0;
}
Exemplo n.º 15
0
int main ( int argc, char *argv[] )
{
//Variables for parsing the data file
	std::string filename = "SPECT.train";
	std::string line;
	std::stringstream parse;
	int ssize = 100; //establish a buffer size to store attribute values,
			 //which for binary classification string are no bigger than 1
	char c[ssize];
	char delimiter = ',';

	//Variables to store the values in the data file
	std::vector<int> tmpcase;
	std::vector< std::vector<int> > training_set;

	cv::Mat sample(0, 1, CV_32FC1);
	cv::Mat labels(0, 1 , CV_16SC1);
	cv::Mat train_set;

	std::ifstream dataset_file(filename.c_str(), std::ios::in);

	if(!dataset_file)
	{
		std::cerr << "Cannot load training set file" << std::endl;
	}
	else
	{
		while( (getline(dataset_file, line))!= NULL )
		{
			parse << line;

			while( parse.getline(c,ssize,delimiter) )
			{
				tmpcase.push_back( (*c-'0') );
				sample.push_back( (float)(*c-'0') );
			}

			parse.str(""); //safety measure to erase previous contents
			parse.clear(); //clear flags to be able to read from it again

			training_set.push_back(tmpcase);
			tmpcase.clear(); 

			train_set.push_back(sample.reshape(0,1));
			labels.push_back((int)(sample.at<float>(0)));
			sample = cv::Mat();
			
		}
	}

	std::cout << train_set << std::endl;
	cv::FileStorage fstore_traindata("spect_train.yml",cv::FileStorage::WRITE);
	cv::Mat train_samples(train_set.colRange(1,train_set.cols));
	fstore_traindata << "train_samples" << train_samples;
	fstore_traindata << "train_labels" << labels;
	fstore_traindata.release();
	std::cout << train_samples << std::endl;
	std::cout << labels << std::endl;

	std::vector<int> tmp;
	for(std::vector< std::vector<int> >::iterator it = training_set.begin(); it != training_set.end(); ++it)
	{
		tmp = *it;
		for(std::vector<int>::iterator it2 = tmp.begin(); it2 != tmp.end(); ++it2)
		{
			std::cout << *it2 << " ";
		}
		std::cout << std::endl;
		tmp.clear();
	}

}
Exemplo n.º 16
0
Arquivo: tree.c Projeto: Arafatk/mlpy
static int compute_tree_bagging(ETree *etree,int n,int d,double *x[],
				int y[], int nmodels,int stumps, int minsize)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;

  if(nmodels<1){
    fprintf(stderr,"compute_tree_bagging: nmodels must be greater than 0\n");
    return 1;
  }

 if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
    return 1;
  }

  etree->nclasses=iunique(y,n, &(etree->classes));


  if(etree->nclasses<=0){
    fprintf(stderr,"compute_tree_bagging: iunique error\n");
    return 1;
  }
  if(etree->nclasses==1){
    fprintf(stderr,"compute_tree_bagging: only 1 class recognized\n");
    return 1;
  }

  if(etree->nclasses==2)
    if(etree->classes[0] != -1 || etree->classes[1] != 1){
      fprintf(stderr,"compute_tree_bagging: for binary classification classes must be -1,1\n");
      return 1;
    }
  
  if(etree->nclasses>2)
    for(i=0;i<etree->nclasses;i++)
      if(etree->classes[i] != i+1){
	fprintf(stderr,"compute_tree_bagging: for %d-class classification classes must be 1,...,%d\n",etree->nclasses,etree->nclasses);
	return 1;
      }

  if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }
  etree->nmodels=nmodels;
  if(!(etree->weights=dvector(nmodels))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }

  for(b=0;b<nmodels;b++)
    etree->weights[b]=1.0 / (double) nmodels;
  
  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_tree_bagging: out of memory\n");
    return 1;
  }
  
  for(b=0;b<nmodels;b++){
    if(sample(n, NULL, n, &samples, TRUE,b)!=0){
       fprintf(stderr,"compute_tree_bagging: sample error\n");
       return 1;
    }

    for(i =0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }

    if(compute_tree(&(etree->tree[b]),n,d,trx,try,stumps,minsize)!=0){
      fprintf(stderr,"compute_tree_bagging: compute_tree error\n");
      return 1;
    }
    free_ivector(samples);

  }

  free(trx);
  free_ivector(try);
    
  return 0;

}



static int compute_tree_aggregate(ETree *etree,int n,int d,double *x[],int y[],
				  int nmodels,int stumps, int minsize)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  int indx;

  if(nmodels<1){
    fprintf(stderr,"compute_tree_aggregate: nmodels must be greater than 0\n");
    return 1;
  }

  if(nmodels > n){
    fprintf(stderr,"compute_tree_aggregate: nmodels must be less than n\n");
    return 1;
  }

 if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
    return 1;
  }

  etree->nclasses=iunique(y,n, &(etree->classes));

  if(etree->nclasses<=0){
    fprintf(stderr,"compute_tree_aggregate: iunique error\n");
    return 1;
  }
  if(etree->nclasses==1){
    fprintf(stderr,"compute_tree_aggregate: only 1 class recognized\n");
    return 1;
  }

  if(etree->nclasses==2)
    if(etree->classes[0] != -1 || etree->classes[1] != 1){
      fprintf(stderr,"compute_tree_aggregate: for binary classification classes must be -1,1\n");
      return 1;
    }
  
  if(etree->nclasses>2)
    for(i=0;i<etree->nclasses;i++)
      if(etree->classes[i] != i+1){
	fprintf(stderr,"compute_tree_aggregate: for %d-class classification classes must be 1,...,%d\n",etree->nclasses,etree->nclasses);
	return 1;
      }

  if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }
  etree->nmodels=nmodels;
  if(!(etree->weights=dvector(nmodels))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }

  for(b=0;b<nmodels;b++)
    etree->weights[b]=1.0 / (double) nmodels;
  
  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_tree_aggregate: out of memory\n");
    return 1;
  }
  
  if(sample(nmodels, NULL, n, &samples, TRUE,0)!=0){
    fprintf(stderr,"compute_tree_aggregate: sample error\n");
    return 1;
  }

  for(b=0;b<nmodels;b++){
  
    indx=0;
    for(i=0;i<n;i++)
      if(samples[i] == b){
	trx[indx] = x[i];
	try[indx++] = y[i];
      }

    if(compute_tree(&(etree->tree[b]),indx,d,trx,try,stumps,minsize)!=0){
      fprintf(stderr,"compute_tree_aggregate: compute_tree error\n");
      return 1;
    }

  }

  free_ivector(samples);
  free(trx);
  free_ivector(try);
    
  return 0;

}
Exemplo n.º 17
0
void MetropolisRenderer::Render(const Scene *scene) {
    PBRT_MLT_STARTED_RENDERING();
    if (scene->lights.size() > 0) {
        int x0, x1, y0, y1;
        camera->film->GetPixelExtent(&x0, &x1, &y0, &y1);
        float t0 = camera->shutterOpen, t1 = camera->shutterClose;
        Distribution1D *lightDistribution = ComputeLightSamplingCDF(scene);

        if (directLighting != NULL) {
            PBRT_MLT_STARTED_DIRECTLIGHTING();
            // Compute direct lighting before Metropolis light transport
            if (nDirectPixelSamples > 0) {
                LDSampler sampler(x0, x1, y0, y1, nDirectPixelSamples, t0, t1);
                Sample *sample = new Sample(&sampler, directLighting, NULL, scene);
                vector<Task *> directTasks;
                int nDirectTasks = max(32 * NumSystemCores(),
                                 (camera->film->xResolution * camera->film->yResolution) / (16*16));
                nDirectTasks = RoundUpPow2(nDirectTasks);
                ProgressReporter directProgress(nDirectTasks, "Direct Lighting");
                for (int i = 0; i < nDirectTasks; ++i)
                    directTasks.push_back(new SamplerRendererTask(scene, this, camera, directProgress,
                                                                  &sampler, sample, false, i, nDirectTasks));
                std::reverse(directTasks.begin(), directTasks.end());
                EnqueueTasks(directTasks);
                WaitForAllTasks();
                for (uint32_t i = 0; i < directTasks.size(); ++i)
                    delete directTasks[i];
                delete sample;
                directProgress.Done();
            }
            camera->film->WriteImage();
            PBRT_MLT_FINISHED_DIRECTLIGHTING();
        }
        // Take initial set of samples to compute $b$
        PBRT_MLT_STARTED_BOOTSTRAPPING(nBootstrap);
        RNG rng(0);
        MemoryArena arena;
        vector<float> bootstrapI;
        vector<PathVertex> cameraPath(maxDepth, PathVertex());
        vector<PathVertex> lightPath(maxDepth, PathVertex());
        float sumI = 0.f;
        bootstrapI.reserve(nBootstrap);
        MLTSample sample(maxDepth);
        for (uint32_t i = 0; i < nBootstrap; ++i) {
            // Generate random sample and path radiance for MLT bootstrapping
            float x = Lerp(rng.RandomFloat(), x0, x1);
            float y = Lerp(rng.RandomFloat(), y0, y1);
            LargeStep(rng, &sample, maxDepth, x, y, t0, t1, bidirectional);
            Spectrum L = PathL(sample, scene, arena, camera, lightDistribution,
                               &cameraPath[0], &lightPath[0], rng);

            // Compute contribution for random sample for MLT bootstrapping
            float I = ::I(L);
            sumI += I;
            bootstrapI.push_back(I);
            arena.FreeAll();
        }
        float b = sumI / nBootstrap;
        PBRT_MLT_FINISHED_BOOTSTRAPPING(b);
        Info("MLT computed b = %f", b);

        // Select initial sample from bootstrap samples
        float contribOffset = rng.RandomFloat() * sumI;
        rng.Seed(0);
        sumI = 0.f;
        MLTSample initialSample(maxDepth);
        for (uint32_t i = 0; i < nBootstrap; ++i) {
            float x = Lerp(rng.RandomFloat(), x0, x1);
            float y = Lerp(rng.RandomFloat(), y0, y1);
            LargeStep(rng, &initialSample, maxDepth, x, y, t0, t1,
                      bidirectional);
            sumI += bootstrapI[i];
            if (sumI > contribOffset)
                break;
        }

        // Launch tasks to generate Metropolis samples
        uint32_t nTasks = largeStepsPerPixel;
        uint32_t largeStepRate = nPixelSamples / largeStepsPerPixel;
        Info("MLT running %d tasks, large step rate %d", nTasks, largeStepRate);
        ProgressReporter progress(nTasks * largeStepRate, "Metropolis");
        vector<Task *> tasks;
        Mutex *filmMutex = Mutex::Create();
        Assert(IsPowerOf2(nTasks));
        uint32_t scramble[2] = { rng.RandomUInt(), rng.RandomUInt() };
        uint32_t pfreq = (x1-x0) * (y1-y0);
        for (uint32_t i = 0; i < nTasks; ++i) {
            float d[2];
            Sample02(i, scramble, d);
            tasks.push_back(new MLTTask(progress, pfreq, i,
                d[0], d[1], x0, x1, y0, y1, t0, t1, b, initialSample,
                scene, camera, this, filmMutex, lightDistribution));
        }
        EnqueueTasks(tasks);
        WaitForAllTasks();
        for (uint32_t i = 0; i < tasks.size(); ++i)
            delete tasks[i];
        progress.Done();
        Mutex::Destroy(filmMutex);
        delete lightDistribution;
    }
    camera->film->WriteImage();
    PBRT_MLT_FINISHED_RENDERING();
}
Exemplo n.º 18
0
Arquivo: tree.c Projeto: Arafatk/mlpy
static int compute_tree_adaboost(ETree *etree,int n,int d,double *x[],int y[],
				 int nmodels,int stumps, int minsize)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  double *prob;
  double *prob_copy;
  double sumalpha;
  double eps;
  int *pred;
  double *margin;
  double sumprob;
  

  if(nmodels<1){
    fprintf(stderr,"compute_tree_adaboost: nmodels must be greater than 0\n");
    return 1;
  }

 if(stumps != 0 && stumps != 1){
    fprintf(stderr,"compute_tree_bagging: parameter stumps must be 0 or 1\n");
    return 1;
  }

  if(minsize < 0){
    fprintf(stderr,"compute_tree_bagging: parameter minsize must be >= 0\n");
    return 1;
  }

  etree->nclasses=iunique(y,n, &(etree->classes));

  if(etree->nclasses<=0){
    fprintf(stderr,"compute_tree_adaboost: iunique error\n");
    return 1;
  }
  if(etree->nclasses==1){
    fprintf(stderr,"compute_tree_adaboost: only 1 class recognized\n");
    return 1;
  }

  if(etree->nclasses==2)
    if(etree->classes[0] != -1 || etree->classes[1] != 1){
      fprintf(stderr,"compute_tree_adaboost: for binary classification classes must be -1,1\n");
      return 1;
    }
  
  if(etree->nclasses>2){
    fprintf(stderr,"compute_tree_adaboost: multiclass classification not allowed\n");
    return 1;
  }

  if(!(etree->tree=(Tree *)calloc(nmodels,sizeof(Tree)))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  if(!(etree->weights=dvector(nmodels))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }
  
  if(!(prob_copy=dvector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }
  if(!(prob=dvector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  if(!(pred=ivector(n))){
    fprintf(stderr,"compute_tree_adaboost: out of memory\n");
    return 1;
  }

  for(i =0;i<n;i++)
    prob[i]=1.0/(double)n;

  etree->nmodels=nmodels;
  sumalpha=0.0;
  for(b=0;b<nmodels;b++){

    for(i =0;i<n;i++)
      prob_copy[i]=prob[i];
    if(sample(n, prob_copy, n, &samples, TRUE,b)!=0){
      fprintf(stderr,"compute_tree_adaboost: sample error\n");
      return 1;
    }

    for(i=0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }
    
    if(compute_tree(&(etree->tree[b]),n,d,trx,try,stumps,minsize)!=0){
      fprintf(stderr,"compute_tree_adaboost: compute_tree error\n");
      return 1;
    }
    free_ivector(samples);

    eps=0.0;
    for(i=0;i<n;i++){
      pred[i]=predict_tree(&(etree->tree[b]),x[i],&margin);
      if(pred[i] < -1 ){
	fprintf(stderr,"compute_tree_adaboost: predict_tree error\n");
	return 1;
      }
      if(pred[i]==0 || pred[i] != y[i])
	eps += prob[i];
      free_dvector(margin);
    }
    
    if(eps > 0.0 && eps < 0.5){
      etree->weights[b]=0.5 *log((1.0-eps)/eps);
      sumalpha+=etree->weights[b];
    }else{
      etree->nmodels=b;
      break;
    }
      
    sumprob=0.0;
    for(i=0;i<n;i++){
      prob[i]=prob[i]*exp(-etree->weights[b]*y[i]*pred[i]);
      sumprob+=prob[i];
    }

    if(sumprob <=0.0){
      fprintf(stderr,"compute_tree_adaboost: sumprob = 0\n");
      return 1;
    }
    for(i=0;i<n;i++)
      prob[i] /= sumprob;
    
  }
  
  if(etree->nmodels<=0){
    fprintf(stderr,"compute_tree_adaboost: no models produced\n");
    return 1;
  }

  if(sumalpha <=0){
      fprintf(stderr,"compute_tree_adaboost: sumalpha = 0\n");
      return 1;
  }
  for(b=0;b<etree->nmodels;b++)
    etree->weights[b] /= sumalpha;
  
  free(trx);
  free_ivector(try);
  free_ivector(pred);
  free_dvector(prob);
  free_dvector(prob_copy);
  return 0;

}



static void split_node(Node *node,Node *nodeL,Node *nodeR,int classes[],
		       int nclasses)
{
  int **indx;
  double *tmpvar;
  int i,j,k;
  int **npL , **npR;
  double **prL , **prR;
  int totL,totR;
  double a,b;
  double *decrease_in_inpurity;
  double max_decrease=0;
  int splitvar;
  int splitvalue;
  int morenumerous;

  nodeL->priors=dvector(nclasses);
  nodeR->priors=dvector(nclasses);
  nodeL->npoints_for_class=ivector(nclasses);
  nodeR->npoints_for_class=ivector(nclasses);
  indx=imatrix(node->nvar,node->npoints);
  tmpvar=dvector(node->npoints);
  decrease_in_inpurity=dvector(node->npoints-1);
  npL=imatrix(node->npoints,nclasses);
  npR=imatrix(node->npoints,nclasses);
  prL=dmatrix(node->npoints,nclasses);
  prR=dmatrix(node->npoints,nclasses);

  splitvar=0;
  splitvalue=0;
  max_decrease=0;

  for(i=0;i<node->nvar;i++){
    for(j=0;j<node->npoints;j++)
      tmpvar[j]=node->data[j][i];
    
    for(j=0;j<node->npoints;j++)
      indx[i][j]=j;
    dsort(tmpvar,indx[i],node->npoints,SORT_ASCENDING);

    for(k=0;k<nclasses;k++)
      if(node->classes[indx[i][0]]==classes[k]){
	npL[0][k] = 1;
	npR[0][k] = node->npoints_for_class[k]-npL[0][k];
      } else{
	npL[0][k] = 0;
	npR[0][k] = node->npoints_for_class[k];
      }
    
    for(j=1;j<node->npoints-1;j++)
      for(k=0;k<nclasses;k++)
	if(node->classes[indx[i][j]]==classes[k]){
	  npL[j][k] = npL[j-1][k] +1;
	  npR[j][k] = node->npoints_for_class[k] - npL[j][k];
	}
	else {
	  npL[j][k] = npL[j-1][k];
	  npR[j][k] = node->npoints_for_class[k] - npL[j][k];
	}


    for(j=0;j<node->npoints-1;j++){
      if(node->data[indx[i][j]][i] != node->data[indx[i][j+1]][i]){
	totL = totR = 0;
	
	for(k=0;k<nclasses;k++)
	  totL += npL[j][k];
	for(k=0;k<nclasses;k++)
	  prL[j][k] =  (double) npL[j][k] / (double) totL;
	
	for(k=0;k<nclasses;k++)
	  totR += npR[j][k];
	for(k=0;k<nclasses;k++)
	  prR[j][k] =  (double) npR[j][k] /(double)  totR;
	
	a = (double) totL / (double) node->npoints;
	b = (double) totR / (double) node->npoints ;
	
	decrease_in_inpurity[j] = gini_index(node->priors,nclasses) - 
	  a * gini_index(prL[j],nclasses) - b * gini_index(prR[j],nclasses);
      }
    }

    for(j=0;j<node->npoints-1;j++)
      if(decrease_in_inpurity[j] > max_decrease){
	max_decrease = decrease_in_inpurity[j];
	
	splitvar=i;
	splitvalue=j;

	for(k=0;k<nclasses;k++){
	  nodeL->priors[k]=prL[splitvalue][k];
	  nodeR->priors[k]=prR[splitvalue][k];
	  nodeL->npoints_for_class[k]=npL[splitvalue][k];
	  nodeR->npoints_for_class[k]=npR[splitvalue][k];
	}
      }
  }
  
  
  node->var=splitvar;
  node->value=(node->data[indx[splitvar][splitvalue]][node->var]+      
	       node->data[indx[splitvar][splitvalue+1]][node->var])/2.;

  nodeL->nvar=node->nvar;
  nodeL->nclasses=node->nclasses;
  nodeL->npoints=splitvalue+1;

  nodeL->terminal=TRUE;
  if(gini_index(nodeL->priors,nclasses) >0)
    nodeL->terminal=FALSE;

  nodeL->data=(double **) calloc(nodeL->npoints,sizeof(double *));
  nodeL->classes=ivector(nodeL->npoints);

  for(i=0;i<nodeL->npoints;i++){
    nodeL->data[i] = node->data[indx[splitvar][i]];
    nodeL->classes[i] = node->classes[indx[splitvar][i]];
  }
  
  
  morenumerous=0;
  for(k=0;k<nclasses;k++)
    if(nodeL->npoints_for_class[k] > morenumerous){
      morenumerous = nodeL->npoints_for_class[k];
      nodeL->node_class=classes[k];
    }
  


  nodeR->nvar=node->nvar;
  nodeR->nclasses=node->nclasses;
  nodeR->npoints=node->npoints-nodeL->npoints;

  nodeR->terminal=TRUE;
  if(gini_index(nodeR->priors,nclasses) >0)
    nodeR->terminal=FALSE;

  nodeR->data=(double **) calloc(nodeR->npoints,sizeof(double *));
  nodeR->classes=ivector(nodeR->npoints);

  for(i=0;i<nodeR->npoints;i++){
    nodeR->data[i] = node->data[indx[splitvar][nodeL->npoints+i]];
    nodeR->classes[i] = node->classes[indx[splitvar][nodeL->npoints+i]];
  }
  
  morenumerous=0;
  for(k=0;k<nclasses;k++)
    if(nodeR->npoints_for_class[k] > morenumerous){
      morenumerous = nodeR->npoints_for_class[k];
      nodeR->node_class=classes[k];
    }

  free_imatrix(indx,  node->nvar,node->npoints);
  free_imatrix(npL, node->npoints,nclasses);
  free_imatrix(npR, node->npoints,nclasses);
  free_dmatrix(prL, node->npoints,nclasses);
  free_dmatrix(prR, node->npoints,nclasses);
  free_dvector(tmpvar);
  free_dvector(decrease_in_inpurity);

}
Exemplo n.º 19
0
_Use_decl_annotations_
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE, LPSTR, int nCmdShow)
{
	D3D12HelloConstBuffers sample(1280, 720, L"D3D12 Raymarcher");
	return sample.Run(hInstance, nCmdShow);
}
Exemplo n.º 20
0
void compute_sift_keypoints(float *input, keypointslist& keypoints, int width, int height, siftPar &par)
{

	flimage image;

	/// Make zoom of image if necessary
	float octSize = 1.0;
	if (par.DoubleImSize){

		//printf("... compute_sift_keypoints :: applying zoom\n");
//		image.create(2*width, 2*height);
//		apply_zoom(input,image.getPlane(),2.0,par.order,width,height);
//		octSize *= 0.5;
		
		printf("Doulbe image size not allowed. Guoshen Yu\n");
		exit(-1);
		

		
	} else
	{

		image.create(width,height,input);
	}

// 	 printf("Using initial Dog value: %f\n", par.PeakThresh);
//    	 printf("Double image size: %d\n", par.DoubleImSize);
//    	 printf("Interpolation order: %d\n", par.order);


	/// Apply initial smoothing to input image to raise its smoothing to par.InitSigma.  
	/// We assume image from camera has smoothing of sigma = 0.5, which becomes sigma = 1.0 if image has been doubled. 
	/// increase = sqrt(Init^2 - Current^2)
	float	curSigma;
	if (par.DoubleImSize) curSigma = 1.0; else curSigma = 0.5;


	if (par.InitSigma > curSigma ) {

		if (DEBUG) printf("Convolving initial image to achieve std: %f \n", par.InitSigma);

		float sigma = (float) sqrt((double)(par.InitSigma * par.InitSigma - curSigma * curSigma));

		gaussian_convolution( image.getPlane(), image.getPlane(), image.nwidth(), image.nheight(), sigma);

	}



	/// Convolve by par.InitSigma at each step inside OctaveKeypoints by steps of 
	/// Subsample of factor 2 while reasonable image size 

	/// Keep reducing image by factors of 2 until one dimension is
	/// smaller than minimum size at which a feature could be detected.
	int 	minsize = 2 * par.BorderDist + 2;
	int     OctaveCounter = 0;
	//printf("... compute_sift_keypoints :: maximum number of scales : %d\n", par.OctaveMax);

	while (image.nwidth() > minsize &&  image.nheight() > minsize && OctaveCounter < par.OctaveMax) {

		if (DEBUG) printf("Calling OctaveKeypoints \n");

		OctaveKeypoints(image, octSize, keypoints,par); 

		// image is blurred inside OctaveKeypoints and therefore can be sampled
		flimage aux( (int)((float) image.nwidth() / 2.0f) , (int)((float) image.nheight() / 2.0f)); 

		if (DEBUG) printf("Sampling initial image \n");

		sample(image.getPlane(), aux.getPlane(), 2.0f, image.nwidth(), image.nheight());

		image = aux;

		octSize *= 2.0;

		OctaveCounter++;

	}


/*	printf("sift::  %d keypoints\n", keypoints.size());
	printf("sift::  plus non correctly localized: %d \n", 	par.noncorrectlylocalized);*/
	
}
Exemplo n.º 21
0
void Sighter::procSight(cv::Mat &frame, cv::Mat &lfeat, cv::Mat &rfeat) {


    cv::cvtColor(frame, drawMat, CV_BGRA2BGR);
    cv::flip(drawMat, drawMat, 1);

    cv::cvtColor(drawMat, grayMat, CV_BGR2GRAY);

    cv::Rect frect;

    cv::Mat& draw = drawMat;
    cv::Mat& gray = grayMat;

    //Detect face
    cv::vector<cv::Rect> faces;
    faceDetector.detectMultiScale(gray, faces, 1.1, 20, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_FIND_BIGGEST_OBJECT, cv::Size(gray.cols/4,gray.rows/4));

    if(faces.empty()) {
        return;
    }

    frect = faces[0];

    cv::Mat face = gray(frect);

    //Detect eye
    cv::vector<cv::Rect> leyes,reyes;

    cv::Size max_size = cv::Size(face.cols/2,face.rows/4);
    cv::Size min_size = cv::Size(face.cols/10, 10);

    cv::Mat lface = face(cv::Rect(0,face.rows/4,face.cols/2,face.rows/3));
    eyeDetector.detectMultiScale(lface, leyes, 1.1, 20, CV_HAAR_DO_CANNY_PRUNING, min_size, max_size);

    cv::Mat rface = face(cv::Rect(face.cols/2,face.rows/4,face.cols/2,face.rows/3));
    eyeDetector.detectMultiScale(rface, reyes, 1.1, 20, CV_HAAR_DO_CANNY_PRUNING, min_size, max_size);

    int szl = (int)leyes.size();
    int szr = (int)reyes.size();

    if(szl < 1 || szr < 1) {
        return;
    }


    //have min vertical difference
    int min_dy = INT_MAX;
    cv::Rect lerect, rerect;
    for(int i=0;i<szl;i++) {

        int cy = leyes[i].y + leyes[i].height/2;

        for(int j=0;j<szr;j++) {

            int cyr = reyes[j].y + reyes[j].height/2;

            int d = abs(cy-cyr);

            if( d < min_dy) {
                min_dy = d;
                lerect = leyes[i];
                rerect = reyes[j];
            }
        }
    }

    //transform to worldwide coordinate
    lerect.x += frect.x;
    lerect.y += frect.y + frect.height/4;

    rerect.x += frect.x + frect.width/2;
    rerect.y += frect.y + frect.height/4;


    //classifing eye's action
    cv::Mat legray = gray(lerect);
    cv::Mat regray = gray(rerect);

    cv::flip(regray, regray, 1);

    lfeat = legray;
    rfeat = regray;

    cv::Mat sample(50,50,CV_8UC1);

    cv::resize(legray, sample, sample.size());


    int ldir = -1;
    if(eyeActM)
        ldir = eyeActM->predict(sample);

    cv::resize(regray, sample, sample.size());

    int rdir = -1;
    if(eyeActM)
        rdir = eyeActM->predict(sample);


    SightState sstate = STARE_NONE;
    if( ldir == 0 && rdir == 0 ) {
        sstate = STARE_CENTER;
    }
    if( ldir == 2 && rdir == 1) {
        sstate = STARE_LEFT;

    }
    if( ldir == 1 && rdir == 2) {
        sstate = STARE_RIGHT;
    }

    {
#define ACT_STAT_TOTAL 10

        static SightState sstates[ACT_STAT_TOTAL] = {STARE_NONE};
        static int ssi = 0;
        static int ssleft = 0;
        static int ssright = 0;
        static int sscenter = 0;
        static int ssnone = ACT_STAT_TOTAL;

        switch(sstates[ssi]) {
            case STARE_NONE:
                ssnone--;
                break;
            case STARE_LEFT:
                ssleft--;
                break;
            case STARE_RIGHT:
                ssright--;
                break;
            case STARE_CENTER:
                sscenter--;
                break;
            default:
                break;
        }
        switch(sstate) {
            case STARE_NONE:
                ssnone++;
                if(ssnone >= ACT_STAT_TOTAL*0.6) {
                    sightState = STARE_NONE;
                }
                break;
            case STARE_LEFT:
                ssleft++;
                if(ssleft >= ACT_STAT_TOTAL*0.6) {
                    sightState = STARE_LEFT;
                }
                break;
            case STARE_RIGHT:
                ssright++;
                if(ssright >= ACT_STAT_TOTAL*0.6) {
                    sightState = STARE_RIGHT;
                }

                break;
            case STARE_CENTER:
                sscenter++;
                if(sscenter >= ACT_STAT_TOTAL*0.6) {
                    sightState = STARE_CENTER;
                }
                break;
            default:
                break;
        }

        sstates[ssi] = sstate;
        ssi = (ssi+1)%ACT_STAT_TOTAL;


    }


    //tracking the pupil
    pulLeft.procPupil(legray);
    pulRight.procPupil(regray);



    pulLeft.pulCenter += lerect.tl();
    pulRight.pulCenter += rerect.tl();



    //Drawing

    int r = MAX(pulLeft.pulRect.height, pulRight.pulRect.height)/3;

    static int ani_r = 100;
    static int ani_speed = 5;


    if(ani_r > 0) {


        cv::circle(draw, pulLeft.pulCenter, ani_r, CV_RGB(255,0,0),1.5);
        cv::circle(draw, pulRight.pulCenter, ani_r, CV_RGB(255,0,0),1.5);
        ani_r -= ani_speed;
        ani_speed += 5;

    }
    else if( r > 0 ) {



        cv::circle(draw, pulLeft.pulCenter, r, CV_RGB(255,0,0), CV_FILLED);
        cv::circle(draw, pulLeft.pulCenter, r*3, CV_RGB(255,0,0), 1);

        cv::circle(draw, pulRight.pulCenter, r, CV_RGB(255,0,0), CV_FILLED);
        cv::circle(draw, pulRight.pulCenter, r*3, CV_RGB(255,0,0), 1);


    }

    cv::rectangle(draw, frect, CV_RGB(255,255,0));
    cv::rectangle(draw, lerect, CV_RGB(0,255,0));
    cv::rectangle(draw, rerect, CV_RGB(0,255,0));
}
Exemplo n.º 22
0
_Use_decl_annotations_
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE, LPSTR, int nCmdShow)
{
	D3D12nBodyGravity sample(1280, 720, L"D3D12 n-Body Gravity Simulation");
	return sample.Run(hInstance, nCmdShow);
}
Exemplo n.º 23
0
	// Sample a visible neuron, given a hidden neuron vector
	double sample_visible(rbm* r, unsigned int i, std::vector<int> hidden) {
	  return sample(visible_probability(r, i, hidden));
	}
Exemplo n.º 24
0
void PropertiesDialog::setFontSample(const QFont & f)
{
    fontSampleLabel->setFont(f);
    QString sample("%1 %2 pt");
    fontSampleLabel->setText(sample.arg(f.family()).arg(f.pointSize()));
}
Exemplo n.º 25
0
kernel void sample_y(global int *cur_y,
		     global int *cur_z,
		     global int *z_by_ry,
		     global int *cur_r,
		     global int *obs,
		     global float *rand, 
		     uint N, uint D, uint K, uint f_img_width,
		     float lambda, float epislon, float theta) {
  
  const uint V_SCALE = 0, H_SCALE = 1, V_TRANS = 2, H_TRANS = 3, NUM_TRANS = 4;

  uint kth = get_global_id(0); // k is the index of features
  uint dth = get_global_id(1); // d is the index of pixels

  uint f_img_height = D / f_img_width;

  // unpack dth into h and w
  uint h = dth / f_img_width;
  uint w = dth % f_img_width;

  // calculate the prior probability of each cell is 1
  float on_loglik_temp = log(theta); 
  float off_loglik_temp = log(1 - theta);

  int v_scale, h_scale, v_dist, h_dist, new_height, new_width, new_index, n, hh, ww;
  // extremely hackish way to calculate the loglikelihood
  for (n = 0; n < N; n++) {
    // if the nth object has the kth feature
    if (cur_z[n * K + kth] == 1) {
      // retrieve the transformation applied to this feature by this object
      v_scale = cur_r[n * (K * NUM_TRANS) + kth * NUM_TRANS + V_SCALE];
      h_scale = cur_r[n * (K * NUM_TRANS) + kth * NUM_TRANS + H_SCALE];
      v_dist = cur_r[n * (K * NUM_TRANS) + kth * NUM_TRANS + V_TRANS];
      h_dist = cur_r[n * (K * NUM_TRANS) + kth * NUM_TRANS + H_TRANS];
      new_height = f_img_height + v_scale;
      new_width = f_img_width + h_scale;

      // loop over all pixels
      for (hh = 0; hh < f_img_height; hh++) {
	for (ww = 0; ww < f_img_width; ww++) {
	  if ((int)round((float)hh / new_height * f_img_height) == h &
	      (int)round((float)ww / new_width * f_img_width) == w) {
	    new_index = ((v_dist + hh) % f_img_height) * f_img_width + (h_dist + ww) % f_img_width;

	    // if the observed pixel at dth is on
	    if (obs[n * D + new_index] == 1) { // transformed feature affects the pixel at new_index not dth, cf., ibp
	      // if the feature image previously has this pixel on
	      if (cur_y[kth * D + dth] == 1) { // this is dth instead of new_index because we are referring to the original y
		on_loglik_temp += log(1 - pow(1 - lambda, z_by_ry[n * D + new_index]) * (1 - epislon));
		off_loglik_temp += log(1 - pow(1 - lambda, z_by_ry[n * D + new_index] - 1) * (1 - epislon));
	      } else {
		on_loglik_temp += log(1 - pow(1 - lambda, z_by_ry[n * D + new_index] + 1) * (1 - epislon));
		off_loglik_temp += log(1 - pow(1 - lambda, z_by_ry[n * D + new_index]) * (1 - epislon));
	      }
	    } else { // else obs[n * D + new_index] == 0
	      on_loglik_temp += log(1 - lambda);
	      off_loglik_temp += log(1.0f);
	    }
	  }
	}
      }
    }
  }
  float logpost[2] = {on_loglik_temp, off_loglik_temp};
  //printf("%f %f %d \n", logpost[0], logpost[1], cur_y[kth * D + dth]);
  uint labels[2] = {1, 0};
  lognormalize(logpost, 0, 2);
  cur_y[kth * D + dth] = sample(2, labels, logpost, 0, rand[kth * D + dth]);
  //printf("%f %f %d \n", logpost[0], logpost[1], cur_y[kth * D + dth]);
}
Exemplo n.º 26
0
dvec4 VolumeVectorSampler::sample(double x, double y, double z, double t) const {
    return sample(dvec4(x, y, z, t));
}
Exemplo n.º 27
0
_Use_decl_annotations_
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE, LPSTR, int nCmdShow)
{
	D3D12HelloBundles sample(1280, 720, L"D3D12 Hello Bundles");
	return Win32Application::Run(&sample, hInstance, nCmdShow);
}
Exemplo n.º 28
0
dvec4 VolumeVectorSampler::sample(const vec4 &pos) const { return sample(dvec4(pos)); }
Exemplo n.º 29
0
 vectord DiscreteModel::samplePoint()
 {   
   randInt sample(mEngine, intUniformDist(0,mInputSet.size()-1));
   return mInputSet[sample()];
 };
Exemplo n.º 30
0
pixel_t bilinear_sampler_t::sample( const Imath::V2d& p) const
{
    int x = std::max( std::min( IECore::fastFloatFloor( p.x), src_area_.max.x - 1), src_area_.min.x);
    int y = std::max( std::min( IECore::fastFloatFloor( p.y), src_area_.max.y - 1), src_area_.min.y);
    return sample( x, y, p.x - x, p.y - y);
}