Esempio n. 1
0
    Vec2d predict2(InputArray _sample, OutputArray _probs) const
    {
        int ptype = CV_64F;
        Mat sample = _sample.getMat();
        CV_Assert(isTrained());

        CV_Assert(!sample.empty());
        if(sample.type() != CV_64FC1)
        {
            Mat tmp;
            sample.convertTo(tmp, CV_64FC1);
            sample = tmp;
        }
        sample = sample.reshape(1, 1);

        Mat probs;
        if( _probs.needed() )
        {
            if( _probs.fixedType() )
                ptype = _probs.type();
            _probs.create(1, nclusters, ptype);
            probs = _probs.getMat();
        }

        return computeProbabilities(sample, !probs.empty() ? &probs : 0, ptype);
    }
Esempio n. 2
0
bool MyFusion::train (ssi_size_t n_models,
	IModel **models,
	ISamples &samples) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}

	ssi_size_t n_streams = samples.getStreamSize ();

	if (n_streams != n_models) {
		ssi_err ("#models (%u) differs from #streams (%u)", n_models, n_streams);
	}

	for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
		if (!models[n_model]->isTrained ()) {
			models[n_model]->train (samples, n_model);
		}
	}

	_is_trained = true;

	return true;
}
Esempio n. 3
0
void FunctionApproximatorGPR::train(const MatrixXd& inputs, const MatrixXd& targets)
{
  if (isTrained())  
  {
    cerr << "WARNING: You may not call FunctionApproximatorGPR::train more than once. Doing nothing." << endl;
    cerr << "   (if you really want to retrain, call reTrain function instead)" << endl;
    return;
  }
  
  assert(inputs.rows() == targets.rows());
  assert(inputs.cols()==getExpectedInputDim());

  const MetaParametersGPR* meta_parameters_gpr = 
    dynamic_cast<const MetaParametersGPR*>(getMetaParameters());
  
  double max_covar = meta_parameters_gpr->maximum_covariance();
  VectorXd sigmas = meta_parameters_gpr->sigmas();
  
  
  // Compute the gram matrix
  // In a gram matrix, every input point is itself a center
  MatrixXd centers = inputs;
  // Replicate sigmas, because they are the same for each data point/center
  MatrixXd widths = sigmas.transpose().colwise().replicate(centers.rows()); 

  MatrixXd gram(inputs.rows(),inputs.rows());
  bool normalize_activations = false;
  bool asymmetric_kernels = false;
  BasisFunction::Gaussian::activations(centers,widths,inputs,gram,normalize_activations,asymmetric_kernels);
  
  gram *= max_covar;

  setModelParameters(new ModelParametersGPR(inputs,targets,gram,max_covar,sigmas));
  
}
Esempio n. 4
0
void FunctionApproximatorGPR::predictVariance(const MatrixXd& inputs, MatrixXd& variances)
{
  if (!isTrained())  
  {
    cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl;
    return;
  }

  const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters());
  
  
  assert(inputs.cols()==getExpectedInputDim());
  
  unsigned int n_samples = inputs.rows();
  variances.resize(n_samples,1);
  
  MatrixXd ks;
  model_parameters_gpr->kernelActivations(inputs, ks);  

  double maximum_covariance = model_parameters_gpr->maximum_covariance();
  MatrixXd gram_inv = model_parameters_gpr->gram_inv();
  
  for (unsigned int ii=0; ii<n_samples; ii++)
    variances(ii) = maximum_covariance - (ks.row(ii)*gram_inv).dot(ks.row(ii).transpose());

}
Esempio n. 5
0
  void MZTrafoModel::getCoefficients( double& intercept, double& slope, double& power )
  {
    if (!isTrained()) throw Exception::Precondition(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Model is not trained yet.");

    intercept = coeff_[0];
    slope = coeff_[1];
    power = coeff_[2];
  }
void FunctionApproximatorIRFRLS::predict(const MatrixXd& input, MatrixXd& output)
{
  if (!isTrained())  
  {
    cerr << "WARNING: You may not call FunctionApproximatorIRFRLS::predict if you have not trained yet. Doing nothing." << endl;
    return;
  }
  
  const ModelParametersIRFRLS* model_parameters_irfrls = static_cast<const ModelParametersIRFRLS*>(getModelParameters());

  MatrixXd proj_inputs;
  proj(input, model_parameters_irfrls->cosines_periodes_, model_parameters_irfrls->cosines_phase_, proj_inputs);
  output = proj_inputs * model_parameters_irfrls->linear_models_;
}
void FunctionApproximatorIRFRLS::train(const MatrixXd& inputs, const MatrixXd& targets)
{
  if (isTrained())  
  {
    cerr << "WARNING: You may not call FunctionApproximatorIRFRLS::train more than once. Doing nothing." << endl;
    cerr << "   (if you really want to retrain, call reTrain function instead)" << endl;
    return;
  }
  
  assert(inputs.rows() == targets.rows()); // Must have same number of examples
  assert(inputs.cols()==getExpectedInputDim());
  
  const MetaParametersIRFRLS* meta_parameters_irfrls = 
    static_cast<const MetaParametersIRFRLS*>(getMetaParameters());

  int nb_cos = meta_parameters_irfrls->number_of_basis_functions_;

  // Init random generator.
  boost::mt19937 rng(getpid() + time(0));

  // Draw periodes
  boost::normal_distribution<> twoGamma(0, sqrt(2 * meta_parameters_irfrls->gamma_));
  boost::variate_generator<boost::mt19937&, boost::normal_distribution<> > genPeriods(rng, twoGamma);
  MatrixXd cosines_periodes(nb_cos, inputs.cols());
  for (int r = 0; r < nb_cos; r++)
    for (int c = 0; c < inputs.cols(); c++)
      cosines_periodes(r, c) = genPeriods();

  // Draw phase
  boost::uniform_real<> twoPi(0, 2 * boost::math::constants::pi<double>());
  boost::variate_generator<boost::mt19937&, boost::uniform_real<> > genPhases(rng, twoPi);
  VectorXd cosines_phase(nb_cos);
  for (int r = 0; r < nb_cos; r++)
      cosines_phase(r) = genPhases();

  MatrixXd proj_inputs;
  proj(inputs, cosines_periodes, cosines_phase, proj_inputs);

  // Compute linear model analatically
  double lambda = meta_parameters_irfrls->lambda_;
  MatrixXd toInverse = lambda * MatrixXd::Identity(nb_cos, nb_cos) + proj_inputs.transpose() * proj_inputs;
  VectorXd linear_model = toInverse.inverse() *
    (proj_inputs.transpose() * targets);

  setModelParameters(new ModelParametersIRFRLS(linear_model, cosines_periodes, cosines_phase));
}
Esempio n. 8
0
bool MyModel::train (ISamples &samples,
	ssi_size_t stream_index) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}

	_n_classes = samples.getClassSize ();
	_n_features = samples.getStream (stream_index).dim;
	_centers = new ssi_real_t *[_n_classes];
	for (ssi_size_t i = 0; i < _n_classes; i++) {
		_centers[i] = new ssi_real_t[_n_features];
		for (ssi_size_t j = 0; j < _n_features; j++) {
			_centers[i][j] = 0;
		}
	}

	ssi_sample_t *sample;	
	samples.reset ();
	ssi_real_t *ptr = 0;
	while (sample = samples.next ()) {				
		ssi_size_t id = sample->class_id;	
		ptr = ssi_pcast (ssi_real_t, sample->streams[stream_index]->ptr);
		for (ssi_size_t j = 0; j < _n_features; j++) {
			_centers[id][j] += ptr[j];
		}
	}	 

	for (ssi_size_t i = 0; i < _n_classes; i++) {
		ssi_size_t num = samples.getSize (i);
		for (ssi_size_t j = 0; j < _n_features; j++) {
			_centers[i][j] /= num;
		}
	}

	return true;
}
Esempio n. 9
0
void LayoutTest::testTrainer() {

	//cv::Mat testM(10, 10, CV_8UC1);
	//
	//for (int rIdx = 0; rIdx < testM.rows; rIdx++) {
	//	unsigned char* ptr = testM.ptr<unsigned char>(rIdx);
	//	for (int cIdx = 0; cIdx < testM.cols; cIdx++) {
	//		ptr[cIdx] = cIdx*rIdx+cIdx;
	//	}
	//}
	//
	//QJsonObject jo = Image::matToJson(testM);
	//cv::Mat t2 = Image::jsonToMat(jo);

	//cv::Scalar s = cv::sum(cv::abs(testM - t2));
	//if (s[0] != 0)
	//	qWarning() << "inconsistent json2Mat I/O";
	//else
	//	qInfo() << "json to mat is just fine...";

	
	Timer dt;
	FeatureCollectionManager fcm = FeatureCollectionManager::read(mConfig.featureCachePath());

	// train classifier
	SuperPixelTrainer spt(fcm);

	if (!spt.compute())
		qCritical() << "could not train data...";

	spt.write(mConfig.classifierPath());
	
	// read back the model
	QSharedPointer<SuperPixelModel> model = SuperPixelModel::read(mConfig.classifierPath());

	auto f = model->model();
	if (f->isTrained())
		qDebug() << "the classifier I loaded is trained...";
	
	//qDebug() << fcm.numFeatures() << "SuperPixels trained in" << dt;
}
Esempio n. 10
0
bool SimpleFusion::train (ssi_size_t n_models,
	IModel **models,
	ISamples &samples) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}

	ssi_size_t n_streams = samples.getStreamSize ();

	if (n_streams != 1 && n_streams != n_models) {
		ssi_err ("#models (%u) differs from #streams (%u)", n_models, n_streams);
	}

	if (samples.hasMissingData ()) {
		ISMissingData samples_h (&samples);
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			if (!models[n_model]->isTrained ()) {
				samples_h.setStream(n_streams == 1 ? 0 : n_model);
				models[n_model]->train (samples_h, n_model);
			}
		}
	} else {
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			if (!models[n_model]->isTrained ()) {		
				models[n_model]->train(samples, n_streams == 1 ? 0 : n_model);
			}
		}
	}

	_is_trained = true;

	return true;
}
Esempio n. 11
0
bool MajorityVoting::train (ssi_size_t n_models,
	IModel **models,
	ISamples &samples) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (samples.getStreamSize () != n_models) {
		ssi_wrn ("#models (%u) differs from #streams (%u)", n_models, samples.getStreamSize ());
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	} 

	_n_streams = samples.getStreamSize ();
	_n_classes = samples.getClassSize ();
	_n_models  = n_models;

	if (samples.hasMissingData ()) {
		ISMissingData samples_h (&samples);
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			if (!models[n_model]->isTrained ()) {
				samples_h.setStream (n_model);
				models[n_model]->train (samples_h, n_model);
			}
		}
	}
	else{
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			if (!models[n_model]->isTrained ()) { models[n_model]->train (samples, n_model); }
		}		
	}
	
	return true;
}
Esempio n. 12
0
bool SimpleKNN::train (ISamples &samples,
	ssi_size_t stream_index) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (samples.getSize () < _options.k) {
		ssi_wrn ("sample list has less than '%u' entries", _options.k);
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}

	_n_classes = samples.getClassSize ();
	_n_samples = samples.getSize ();	
	_n_features = samples.getStream (stream_index).dim;
	_data = new ssi_real_t[_n_features*_n_samples];
	_classes = new ssi_size_t[_n_samples];

	ssi_sample_t *sample;	
	samples.reset ();
	ssi_real_t *data_ptr = _data;
	ssi_size_t *class_ptr = _classes;
	ssi_stream_t *stream_ptr = 0;
	ssi_size_t bytes_to_copy = _n_features * sizeof (ssi_real_t);
	while (sample = samples.next ()) {				
		memcpy (data_ptr, sample->streams[stream_index]->ptr, bytes_to_copy);
		*class_ptr++ = sample->class_id;
		data_ptr += _n_features;
	}	 

	return true;
}
Esempio n. 13
0
void FunctionApproximatorGPR::predict(const MatrixXd& inputs, MatrixXd& outputs)
{
  if (!isTrained())  
  {
    cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl;
    return;
  }

  const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters());
  
  assert(inputs.cols()==getExpectedInputDim());
  unsigned int n_samples = inputs.rows();
  
  outputs.resize(n_samples,1);
  
  MatrixXd ks;
  model_parameters_gpr->kernelActivations(inputs, ks);
  
  VectorXd weights = model_parameters_gpr->weights();
  for (unsigned int ii=0; ii<n_samples; ii++)
    outputs(ii) = ks.row(ii).dot(weights);
  
}
Esempio n. 14
0
bool SVM::train (ISamples &samples,
	ssi_size_t stream_index) {

	if (_options.seed > 0) {
		srand(_options.seed);
	} else {
		srand(ssi_time_ms());
	}

	ISamples *s_balance = 0;
	switch (_options.balance) {
	case BALANCE::OFF: {
		s_balance = &samples;
		break;
	}
	case BALANCE::OVER: {		
		s_balance = new ISOverSample(&samples);
		ssi_pcast(ISOverSample, s_balance)->setOver(ISOverSample::RANDOM);
		ssi_msg(SSI_LOG_LEVEL_BASIC, "balance training set '%u' -> '%u'", samples.getSize(), s_balance->getSize());
		break;
	}
	case BALANCE::UNDER: {		
		s_balance = new ISUnderSample(&samples);
		ssi_pcast(ISUnderSample, s_balance)->setUnder(ISUnderSample::RANDOM);
		ssi_msg(SSI_LOG_LEVEL_BASIC, "balance training set '%u' -> '%u'", samples.getSize(), s_balance->getSize());
		break;
	}
	}

	_n_samples = s_balance->getSize();

	if (_n_samples == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}

	_n_classes = s_balance->getClassSize();
	_n_features = s_balance->getStream(stream_index).dim;
	ssi_size_t elements = _n_samples * (_n_features + 1);

	init_class_names(*s_balance);

	_problem = new svm_problem;
	_problem->l = ssi_cast (int, _n_samples);
	_problem->y = new double[_problem->l];
	_problem->x = new svm_node *[_problem->l];	

	s_balance->reset();
	ssi_sample_t *sample;
	int n_sample = 0;
	float *ptr = 0;
	svm_node *node = 0;
	while (sample = s_balance->next()) {
		ptr = ssi_pcast (float, sample->streams[stream_index]->ptr);		
		_problem->x[n_sample] = new svm_node[_n_features + 1];
		_problem->y[n_sample] = ssi_cast (float, sample->class_id);
		node = _problem->x[n_sample];
		for (ssi_size_t nfeat = 0; nfeat < _n_features; nfeat++) {
			node->index = nfeat+1;
            node->value = *ptr;
            ptr++;
			++node;
		}
		node->index = -1;		
		++n_sample;
	}

	if(_options.params.gamma == 0 && _n_features > 0) {
		_options.params.gamma = 1.0 / _n_features;
	}
	
	if (_options.params.kernel_type == PRECOMPUTED) {
		int max_index = ssi_cast (int, _n_features);
		for (int i = 0; i < _problem->l; i++) {
			if (_problem->x[i][0].index != 0) {
				ssi_err ("wrong input format: first column must be 0:sample_serial_number");				
			}
			if ((int)_problem->x[i][0].value <= 0 || (int)_problem->x[i][0].value > max_index) {
				ssi_err ("wrong input format: sample_serial_number out of range");
			}
		}
	}
Esempio n. 15
0
void FunctionApproximator::train(const MatrixXd& inputs, const MatrixXd& targets, string save_directory, bool overwrite)
{
  train(inputs,targets);
  
  if (save_directory.empty())
    return;
  
  if (!isTrained())
    return;
  
  if (getExpectedInputDim()<3)
  {
    
    VectorXd min = inputs.colwise().minCoeff();
    VectorXd max = inputs.colwise().maxCoeff();
    
    int n_samples_per_dim = 100;
    if (getExpectedInputDim()==2) n_samples_per_dim = 40;
    VectorXi n_samples_per_dim_vec = VectorXi::Constant(getExpectedInputDim(),n_samples_per_dim);

    model_parameters_->saveGridData(min, max, n_samples_per_dim_vec, save_directory, overwrite);
    
  }

  MatrixXd outputs;
  predict(inputs,outputs);

  saveMatrix(save_directory,"inputs.txt",inputs,overwrite);
  saveMatrix(save_directory,"targets.txt",targets,overwrite);
  saveMatrix(save_directory,"outputs.txt",outputs,overwrite);
  
  string filename = save_directory+"/plotdata.py";
  ofstream outfile;
  outfile.open(filename.c_str()); 
  if (!outfile.is_open())
  {
    cerr << __FILE__ << ":" << __LINE__ << ":";
    cerr << "Could not open file " << filename << " for writing." << endl;
  } 
  else
  {
    // Python code generation in C++. Rock 'n' roll! ;-)
    if (inputs.cols()==2) {                                                                                           
      outfile << "from mpl_toolkits.mplot3d import Axes3D                                       \n";
    }
    outfile   << "import numpy                                                                  \n";
    outfile   << "import matplotlib.pyplot as plt                                               \n";
    outfile   << "directory = '" << save_directory << "'                                        \n";
    outfile   << "inputs   = numpy.loadtxt(directory+'/inputs.txt')                             \n";
    outfile   << "targets  = numpy.loadtxt(directory+'/targets.txt')                            \n";
    outfile   << "outputs  = numpy.loadtxt(directory+'/outputs.txt')                            \n";
    outfile   << "fig = plt.figure()                                                            \n";
    if (inputs.cols()==2) {                                                                                           
      outfile << "ax = Axes3D(fig)                                                              \n";
      outfile << "ax.plot(inputs[:,0],inputs[:,1],targets, '.', label='targets',color='black')  \n";
      outfile << "ax.plot(inputs[:,0],inputs[:,1],outputs, '.', label='predictions',color='red')\n";
      outfile << "ax.set_xlabel('input_1'); ax.set_ylabel('input_2'); ax.set_zlabel('output')   \n";
      outfile << "ax.legend(loc='lower right')                                                  \n";
    } else {                                                                                           
      outfile << "plt.plot(inputs,targets, '.', label='targets',color='black')                  \n";
      outfile << "plt.plot(inputs,outputs, '.', label='predictions',color='red')                \n";
      outfile << "plt.xlabel('input'); plt.ylabel('output');                                    \n";
      outfile << "plt.legend(loc='lower right')                                                 \n";
    }                                                                                           
    outfile   << "plt.show()                                                                    \n";
    outfile << endl;

    outfile.close();
    //cout << "        ______________________________________________________________" << endl;
    //cout << "        | Plot saved data with:" << " 'python " << filename << "'." << endl;
    //cout << "        |______________________________________________________________" << endl;
  }
  
}
Esempio n. 16
0
bool WeightedMajorityVoting::forward (ssi_size_t n_models,
	IModel **models,
	ssi_size_t n_streams,
	ssi_stream_t *streams[],
	ssi_size_t n_probs,
	ssi_real_t *probs) {

	if (!isTrained ()) {
		ssi_wrn ("not trained");
		return false;
	}

	if (n_streams != _n_streams) {
		ssi_wrn ("#streams (%u) differs from #streams (%u)", n_streams, _n_streams);
		return false;
	}

	if (_n_streams != n_models) {
		ssi_wrn ("#models (%u) differs from #streams (%u)", n_models, _n_streams);
		return false;
	}

	if (_n_classes != n_probs) {
		ssi_wrn ("#probs (%u) differs from #classes (%u)", n_probs ,_n_classes);
		return false;
	}

	bool found_data = false;

	IModel *model = 0;
	ssi_stream_t *stream = 0;

	//calculate actual models
	ssi_size_t miss_counter = 0;
	ssi_size_t *available = new ssi_size_t[n_models];
	for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
		stream = streams[n_model];
		if (stream->num > 0) {
			found_data = true;
			available[n_model] = 1;
		}
		else{
			miss_counter++;
			available[n_model] = 0;
		}
	}
	ssi_size_t counter = 0;
	ssi_size_t *models_actual = new ssi_size_t[(n_models - miss_counter)];
	for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
		if(available[n_model] == 1){
			models_actual[counter] = n_model;
			counter++;
		}
	}

	if(found_data){

		ssi_size_t *votes = new ssi_size_t[(n_models - miss_counter)];

		for (ssi_size_t n_model = 0; n_model < (n_models - miss_counter); n_model++) {
			model = models[models_actual[n_model]];
			stream = streams[models_actual[n_model]];
			model->forward (*stream, n_probs, probs);

			ssi_size_t max_ind = 0;
			ssi_real_t max_val = probs[0];
			
			for (ssi_size_t i = 1; i < n_probs; i++) {
				if (probs[i] > max_val) {
					max_val = probs[i];
					max_ind = i;
				}
			}
			votes[n_model] = max_ind;

			if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
				for (ssi_size_t num_probs = 0; num_probs < n_probs; num_probs++){
					ssi_print("%f ", probs[num_probs]);
				}
				ssi_print("- vote: %d\n", max_ind);
			}

		}

		//clear probs
		for (ssi_size_t num_probs = 0; num_probs < n_probs; num_probs++){
			probs[num_probs] = 0;
		}

		//fill probs with votes
		ssi_char_t weighting_method = 'a';
		//a = average
		//c = class
		for(ssi_size_t n_model = 0; n_model < (n_models - miss_counter); n_model++){
			switch (weighting_method)
			{
			case ('a'):
				probs[votes[n_model]] = (probs[votes[n_model]])+(_weights[models_actual[n_model]][_n_classes]);
				break;
			case ('c'):
				probs[votes[n_model]] = (probs[votes[n_model]])+(_weights[models_actual[n_model]][votes[n_model]]);
				break;
			}
		}

		if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
			ssi_print("\n");
		}

		if(votes){
			delete[] votes;
			votes = 0;
		}
	}

	if(available){
		delete [] available;
		available = 0;
	}
	if(models_actual){
		delete [] models_actual;
		models_actual = 0;
	}
		
	/// is there a draw ? ///
	ssi_size_t max_ind = 0;
	ssi_size_t max_ind_draw = 0;
	ssi_real_t max_val = probs[0];
	bool draw = false;

	for (ssi_size_t i = 1; i < n_probs; i++) {
		if (probs[i] >= max_val) {
			if(probs[i] == max_val){
				draw = true;
				max_ind_draw = i;
			}
			max_val = probs[i];
			max_ind = i;
		}
	}
	
	if(draw && (max_ind == max_ind_draw)){
		return false;
	}else{
		return found_data;
	}
}
Esempio n. 17
0
bool StatModel::empty() const { return !isTrained(); }
Esempio n. 18
0
bool WeightedMajorityVoting::train (ssi_size_t n_models,
	IModel **models,
	ISamples &samples) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (samples.getStreamSize () != n_models) {
		ssi_wrn ("#models (%u) differs from #streams (%u)", n_models, samples.getStreamSize ());
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}  

	_n_streams = samples.getStreamSize ();
	_n_classes = samples.getClassSize ();
	_n_models  = n_models;

	_weights = new ssi_real_t*[n_models];
	for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
		_weights[n_model] = new ssi_real_t[_n_classes+1];		
	}

	if (samples.hasMissingData ()) {
		ISMissingData samples_h (&samples);
		Evaluation eval;
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			if (!models[n_model]->isTrained ()) {
				samples_h.setStream (n_model);
				models[n_model]->train (samples_h, n_model);
			}
			eval.eval (*models[n_model], samples_h, n_model);
			for (ssi_size_t n_class = 0; n_class < _n_classes; n_class++) {
				_weights[n_model][n_class] = eval.get_class_prob (n_class);
			}		
			_weights[n_model][_n_classes] = eval.get_classwise_prob ();	
		}
	}
	else{
		Evaluation eval;
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			if (!models[n_model]->isTrained ()) { models[n_model]->train (samples, n_model); }
			eval.eval (*models[n_model], samples, n_model);
			for (ssi_size_t n_class = 0; n_class < _n_classes; n_class++) {
				_weights[n_model][n_class] = eval.get_class_prob (n_class);
			}		
			_weights[n_model][_n_classes] = eval.get_classwise_prob ();
		}		
	}

	if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
		ssi_print("\nClassifier Weights: \n");
		for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
			for (ssi_size_t n_class = 0; n_class < _n_classes; n_class++) {
				ssi_print ("%f ", _weights[n_model][n_class]);
			}
			ssi_print ("%f\n", _weights[n_model][_n_classes]);
		}
	}

	return true;
}
Esempio n. 19
0
bool FeatureFusion::train (ssi_size_t n_models,
	IModel **models,
	ISamples &samples) {

	if (samples.getSize () == 0) {
		ssi_wrn ("empty sample list");
		return false;
	}

	if (isTrained ()) {
		ssi_wrn ("already trained");
		return false;
	}

	_n_streams = samples.getStreamSize ();
	_n_classes = samples.getClassSize ();
	_n_models  = n_models;

	//initialize weights
	ssi_real_t **weights = new ssi_real_t*[n_models];
	for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
		weights[n_model] = new ssi_real_t[_n_classes+1];		
	}

	if (samples.hasMissingData ()) {

		_handle_md = true;

		ISMissingData samples_h (&samples);
		Evaluation eval;
		
		if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
			ssi_print("\nMissing data detected.\n");
		}
		
		//models[0] is featfuse_model, followed by singlechannel_models
		ISMergeDim ffusionSamples (&samples);
		ISMissingData ffusionSamples_h (&ffusionSamples);
		ffusionSamples_h.setStream(0);
		if (!models[0]->isTrained ()) { models[0]->train (ffusionSamples_h, 0); }

		if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
			eval.eval (*models[0], ffusionSamples_h, 0);
			eval.print();
		}
		//dummy weights for fused model
		for (ssi_size_t n_class = 0; n_class < _n_classes; n_class++) {
			weights[0][n_class] = 0.0f;
		}		
		weights[0][_n_classes] = 0.0f;	
		
		for (ssi_size_t n_model = 1; n_model < n_models; n_model++) {
			
			if (!models[n_model]->isTrained ()) {
				samples_h.setStream (n_model - 1);
				models[n_model]->train (samples_h, n_model - 1);
			}

			eval.eval (*models[n_model], samples_h, n_model - 1);

			if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
				eval.print();
			}

			for (ssi_size_t n_class = 0; n_class < _n_classes; n_class++) {
				weights[n_model][n_class] = eval.get_class_prob (n_class);
			}		
			weights[n_model][_n_classes] = eval.get_classwise_prob ();	
		}

		//calculate fillers
		_filler = new ssi_size_t[_n_streams];
		for (ssi_size_t n_fill = 0; n_fill < _n_streams; n_fill++) {
			_filler[n_fill] = 1;
			ssi_real_t filler_weight = weights[1][_n_classes];
			for (ssi_size_t n_model = 2; n_model < n_models; n_model++) {
				if (filler_weight < weights[n_model][_n_classes]) {
					_filler[n_fill] = n_model;
					filler_weight = weights[n_model][_n_classes];
				}
			}
			weights[_filler[n_fill]][_n_classes] = 0.0f;
		}
		if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
			ssi_print("\nfiller:\n");
			for (ssi_size_t n_model = 0; n_model < _n_streams; n_model++) {
				ssi_print("%d ", _filler[n_model]);
			}ssi_print("\n");
		}
	
	}
	else{

		_handle_md = false;

		if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
			ssi_print("\nNo missing data detected.\n");
		}
		ISMergeDim ffusionSamples (&samples);
		if (!models[0]->isTrained ()) { models[0]->train (ffusionSamples, 0); }
		//dummy
		_filler = new ssi_size_t[_n_streams];
		for (ssi_size_t n_fill = 0; n_fill < _n_streams; n_fill++) {
			_filler[n_fill] = 0;
		}
	}

	if (weights) {
		for (ssi_size_t n_model = 0; n_model < _n_models; n_model++) {
			delete[] weights[n_model];
		}
		delete[] weights;
		weights = 0;
	}

	return true;
}
Esempio n. 20
0
bool FeatureFusion::forward (ssi_size_t n_models,
	IModel **models,
	ssi_size_t n_streams,
	ssi_stream_t *streams[],
	ssi_size_t n_probs,
	ssi_real_t *probs) {

	if (!isTrained ()) {
		ssi_wrn ("not trained");
		return false;
	}

	if (n_streams != _n_streams) {
		ssi_wrn ("#streams (%u) differs from #streams (%u)", n_streams, _n_streams);
		return false;
	}

	if (_n_models != n_models) {
		ssi_wrn ("#models (%u) differs from #models (%u)", n_models, _n_models);
		return false;
	}

	if (_n_classes != n_probs) {
		ssi_wrn ("#probs (%u) differs from #classes (%u)", n_probs ,_n_classes);
		return false;
	}

	//No Missing Data:
	if(!_handle_md){

		IModel *model = 0;
		ssi_stream_t *stream = 0;

		model = models[0];

		ssi_stream_t *fusion_stream = new ssi_stream_t;

		ssi_size_t fusion_stream_dim = 0;
		for(ssi_size_t nstrm = 0; nstrm < _n_streams; nstrm++){
			fusion_stream_dim += streams[nstrm]->dim;
		}

		//create aligned streams
		ssi_stream_init (*fusion_stream, 1, fusion_stream_dim, streams[0]->byte, streams[0]->type, streams[0]->sr);
		
		ssi_byte_t *ptr = fusion_stream->ptr;
		for(ssi_size_t i = 0; i < _n_streams; i++){
			memcpy(ptr, streams[i]->ptr, ( streams[i]->byte * streams[i]->dim ) );
			ptr += ( streams[i]->byte * streams[i]->dim );
		}

		//clear probs
		for (ssi_size_t num_probs = 0; num_probs < n_probs; num_probs++){
			probs[num_probs] = 0.0f;
		}

		model->forward (*fusion_stream, n_probs, probs);

		ssi_stream_destroy(*fusion_stream);
		delete fusion_stream;
		fusion_stream = 0;

		///// is there a draw ? ///
		ssi_size_t max_ind = 0;
		ssi_size_t max_ind_draw = 0;
		ssi_real_t max_val = probs[0];
		bool draw = false;

		for (ssi_size_t i = 1; i < n_probs; i++) {
			if (probs[i] >= max_val) {
				if(probs[i] == max_val){
					draw = true;
					max_ind_draw = i;
				}
				max_val = probs[i];
				max_ind = i;
			}
		}
		
		if(draw && (max_ind == max_ind_draw)){
			return false;
		}else{
			return true;
		}

	}//No Missing Data


	//Missing Data:
	bool found_data = false;

	IModel *model = 0;
	ssi_stream_t *stream = 0;

	//calculate actual models
	ssi_size_t miss_counter = 0;
	ssi_size_t *available = new ssi_size_t[n_models];
	available[0] = 1;
	for (ssi_size_t n_model = 1; n_model < _n_models; n_model++) {
		stream = streams[n_model - 1];
		if (stream->num > 0) {
			found_data = true;
			available[n_model] = 1;
		}
		else{
			miss_counter++;
			available[n_model] = 0;
			if(available[0] == 1){
				available[0] = 0;
				miss_counter++;
			}
		}
	}
	ssi_size_t counter = 0;
	ssi_size_t *models_actual = new ssi_size_t[(n_models - miss_counter)];
	for (ssi_size_t n_model = 0; n_model < n_models; n_model++) {
		if(available[n_model] == 1){
			models_actual[counter] = n_model;
			counter++;
		}
	}

	if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
		ssi_print("\n\n-----------------------------\navailable models:\n");
		for(ssi_size_t i = 0; i < (n_models - miss_counter); i++){
			ssi_print("%d ", models_actual[i]);
		}ssi_print("\n");
	}

	if(found_data){

		if(available[0] == 1){
			//feature fusion possible
			if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
				ssi_print("\nfeature fusion possible\n");
			}

			model = models[0];
			stream = 0;
			ssi_stream_t *fusion_stream = new ssi_stream_t;

			ssi_size_t fusion_stream_dim = 0;
			for(ssi_size_t nstrm = 0; nstrm < _n_streams; nstrm++){
				fusion_stream_dim += streams[nstrm]->dim;
			}

			//create aligned streams
			ssi_stream_init (*fusion_stream, 1, fusion_stream_dim, streams[0]->byte, streams[0]->type, streams[0]->sr);
			
			ssi_byte_t *ptr = fusion_stream->ptr;
			for(ssi_size_t i = 0; i < _n_streams; i++){
				memcpy(ptr, streams[i]->ptr, ( streams[i]->byte * streams[i]->dim ) );
				ptr += ( streams[i]->byte * streams[i]->dim );
			}

			//clear probs
			for (ssi_size_t num_probs = 0; num_probs < n_probs; num_probs++){
				probs[num_probs] = 0.0f;
			}

			model->forward (*fusion_stream, n_probs, probs);

			ssi_stream_destroy(*fusion_stream);
			delete fusion_stream;
			fusion_stream = 0;

			if(available){
				delete [] available;
				available = 0;
			}
			if(models_actual){
				delete [] models_actual;
				models_actual = 0;
			}

			return true;

		}else{
			//feature fusion not possible, choose filler ...
			if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
				ssi_print("\nfeature fusion not possible: filler needed\n");
				ssi_print("\nfiller:\n");
				for (ssi_size_t n_model = 0; n_model < _n_streams; n_model++) {
					ssi_print("%d ", _filler[n_model]);
				}ssi_print("\n");
			}

			bool model_available = false;
			ssi_size_t model_id = 0;
			for(ssi_size_t h = 0; h < _n_streams; h++){
				model_id = _filler[h];
				for(ssi_size_t i = 0; i < (n_models - miss_counter); i++){
					if(model_id == models_actual[i]){
						model_available = true;
						break;
					}
				}
				if(model_available == true){
					model = models[model_id];
					if (ssi_log_level >= SSI_LOG_LEVEL_DEBUG) {
						ssi_print("\nSelected Model: %d", model_id);
					}
					break;
				}
			}

			model->forward(*streams[model_id - 1], n_probs, probs);

		}

	}

	if(available){
		delete [] available;
		available = 0;
	}
	if(models_actual){
		delete [] models_actual;
		models_actual = 0;
	}
	

	/// is there a draw ? ///
	ssi_size_t max_ind = 0;
	ssi_size_t max_ind_draw = 0;
	ssi_real_t max_val = probs[0];
	bool draw = false;

	for (ssi_size_t i = 1; i < n_probs; i++) {
		if (probs[i] >= max_val) {
			if(probs[i] == max_val){
				draw = true;
				max_ind_draw = i;
			}
			max_val = probs[i];
			max_ind = i;
		}
	}
	
	if(draw && (max_ind == max_ind_draw)){
		return false;
	}else{
		return found_data;
	}
}