コード例 #1
0
ファイル: HataSRD.cpp プロジェクト: loxorolim/SirisOnRails
double loss(double f, double h_tx, double h_rx, double d, int environment, int SRD)
{

	double path_loss = 0;

	double alpha = 1;

	double H_m = fmin(h_tx, h_rx);
	double H_b = fmax(h_tx, h_rx);

	double a = (1.1*log10(f) - 0.7)*fmin(10, H_m) - (1.56*log10(f) - 0.8) + fmax(0, 20 * log10(H_m / 10));
	double b = 0;
	//if (!SRD)
		//b = Math.min(0.20*log10(H_b / 30));
	if (SRD)
		b = (((1.1*log10(f)) - 0.7)*fmin(10, H_b)) - (((1.56*log10(f)) - 0.8)) + fmax(0, (20 * log10(H_b / 10)));
	else
		b = 0.20*log10(H_b/30);


	switch (environment)
	{
		case Urbano:

	//		if ((30 <= f) && (f <= 150))
	//			path_loss = 8 + 69.9 + 26.2*log10(150) - 20 * log10(150 / f) - 13.82*log10(fmax(30, H_b)) + (44.9 - 6.55*log10(fmax(30, H_b)))*(pow(log10(d), alpha)) - a - b;
	//		if ((150 <  f) && (f <= 1500))
	//			path_loss = 77.6 + 26.2*log10(f) - 13.82*log10(fmax(30, H_b)) + (44.9 - 6.55*log10(fmax(30, H_b)))*(pow(log10(d), alpha)) - a - b;
	//
	//		if ((1500 < f) && (f <= 2000))
	//			path_loss = 54.3 + 33.9*log10(f) - 13.82*log10(fmax(30, H_b)) + (44.9 - 6.55*log10(fmax(30, H_b)))*(pow(log10(d), alpha)) - a - b;

			if ((2000 < f) && (f <= 3000))
			{
				double r2 = pow(d,2);
				double r3 = pow(H_b - H_m,2)/1000000;
				double r1 = log10(r2 + r3);
				path_loss = 34.2 + 20*log10(f) + 10*r1 + 30;
			}
			break;

		case Suburbano:
			path_loss =  0.98*loss( f , h_tx , h_rx , d, Urbano, SRD);
			break;

		case Rural:
			path_loss =  0.96*loss( f , h_tx , h_rx , d, Urbano, SRD);
			break;

		default:
			break;
	}
	return path_loss;
}
コード例 #2
0
TYPED_TEST(TripletLossLayerTest, TestForward) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  TripletLossLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  // manually compute to compare
  const Dtype margin = layer_param.triplet_loss_param().margin();
  const int num = this->blob_bottom_data_i_->num();
  const int channels = this->blob_bottom_data_i_->channels();
  Dtype loss(0);
  for (int i = 0; i < num; ++i) {
    Dtype dist_sq_a_p_(0);
    Dtype dist_sq_a_n_(0);
    for (int j = 0; j < channels; ++j) {
      Dtype diff_a_p_ = this->blob_bottom_data_i_->cpu_data()[i*channels+j] -
          this->blob_bottom_data_j_->cpu_data()[i*channels+j];
      Dtype diff_a_n_ = this->blob_bottom_data_i_->cpu_data()[i*channels+j] -
          this->blob_bottom_data_k_->cpu_data()[i*channels+j];
      dist_sq_a_p_ += diff_a_p_*diff_a_p_;
      dist_sq_a_n_ += diff_a_n_*diff_a_n_;
    }
    loss += std::max<Dtype>(dist_sq_a_p_ - dist_sq_a_n_ + margin, Dtype(0.0));
  }
  loss /= static_cast<Dtype>(num);
  EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6);
}
コード例 #3
0
TYPED_TEST(TripletClsLossLayerTest, TestForward) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  TripletClsLossLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  const int num = this->blob_bottom_data_i_->num();
  const int channels = this->blob_bottom_data_i_->channels();
  Dtype loss(0);
  for (int i = 0; i < num; ++i) {
    Dtype dist_sq_pos(0); Dtype dist_sq_neg(0);
    for (int j = 0; j < channels; ++j) {
      Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] -
          this->blob_bottom_data_j_->cpu_data()[i*channels+j];
      dist_sq_pos += diff*diff;
    }
    for (int j = 0; j < channels; ++j) {
      Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] -
          this->blob_bottom_data_k_->cpu_data()[i*channels+j];
      dist_sq_neg += diff*diff;
    }
    loss += std::max(Dtype(0.0), dist_sq_pos - dist_sq_neg + 1);
  }

  loss /= static_cast<Dtype>(2.*num);
  EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6);
}
コード例 #4
0
ファイル: triplet_loss_layer.cpp プロジェクト: yuxng/caffe
void TripletLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  int count = bottom[0]->count();
  caffe_sub(
      count,
      bottom[0]->cpu_data(),  // a
      bottom[1]->cpu_data(),  // b
      diff_pos.mutable_cpu_data());  // a_i-b_i
  caffe_sub(
      count,
      bottom[0]->cpu_data(),  // a
      bottom[2]->cpu_data(),  // c
      diff_neg.mutable_cpu_data());  // a_i-c_i
  const int channels = bottom[0]->channels();
  Dtype margin = this->layer_param_.triplet_loss_param().margin();
  Dtype loss(0.0);
  // Loss component calculated from ab
  for (int i = 0; i < bottom[0]->num(); ++i) {
    dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels,
        diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels));
    // ab is a similar pair
    dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i];
    // Loss component calculated from ac
    dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels,
        diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels));
    // ac is a dissimilar pair
    dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i];
    loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0));
  }
  loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
  top[0]->mutable_cpu_data()[0] = loss;
}
コード例 #5
0
void ContrastiveLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  int count = bottom[0]->count();
  caffe_sub(
      count,
      bottom[0]->cpu_data(),  // a
      bottom[1]->cpu_data(),  // b
      diff_.mutable_cpu_data());  // a_i-b_i
  const int channels = bottom[0]->channels();
  Dtype margin = this->layer_param_.contrastive_loss_param().margin();
  bool legacy_version =
      this->layer_param_.contrastive_loss_param().legacy_version();
  Dtype loss(0.0);
  for (int i = 0; i < bottom[0]->num(); ++i) {
    dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels,
        diff_.cpu_data() + (i*channels), diff_.cpu_data() + (i*channels));
    if (static_cast<int>(bottom[2]->cpu_data()[i])) {  // similar pairs
      loss += dist_sq_.cpu_data()[i];
    } else {  // dissimilar pairs
      if (legacy_version) {
        loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
      } else {
          Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), Dtype(0.0));
        loss += dist*dist;
      }
    }
  }
  loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
  top[0]->mutable_cpu_data()[0] = loss;
}
コード例 #6
0
ファイル: svm_struct_learn.cpp プロジェクト: aa755/cfg3d
CCACHE *create_constraint_cache(SAMPLE sample, STRUCT_LEARN_PARM *sparm, 
				STRUCTMODEL *sm)
     /* create new constraint cache for training set */
{
  long        n=sample.n;
  EXAMPLE     *ex=sample.examples;
  CCACHE      *ccache;
  int         i;

  ccache=(CCACHE *)my_malloc(sizeof(CCACHE));
  ccache->n=n;
  ccache->sm=sm;
  ccache->constlist=(CCACHEELEM **)my_malloc(sizeof(CCACHEELEM *)*n);
  ccache->avg_viol_gain=(double *)my_malloc(sizeof(double)*n);
  ccache->changed=(int *)my_malloc(sizeof(int)*n);
  for(i=0;i<n;i++) { 
    /* add constraint for ybar=y to cache */
    ccache->constlist[i]=(CCACHEELEM *)my_malloc(sizeof(CCACHEELEM));
    ccache->constlist[i]->fydelta=create_svector_n(NULL,0,NULL,1);
    ccache->constlist[i]->rhs=loss(ex[i].y,ex[i].y,sparm)/n;
    ccache->constlist[i]->viol=0;
    ccache->constlist[i]->next=NULL;
    ccache->avg_viol_gain[i]=0;
    ccache->changed[i]=0;
  }
  return(ccache);
}
コード例 #7
0
ファイル: MCMC.cpp プロジェクト: rfok/MCMC
void MCMC::entropyPlot(double norm, string fileName, string fileOut){
    
    vector<double> convergence;
    fstream file;
    file.open(fileName.c_str(), ios::in);
    if (file.is_open()){
    	int step = 0;
		while(file.good()){
    		double x;
    		++step;
    		file >> x;
			gsl_histogram_increment(h,x);	
 				
 			double conv = 0;
 			for(int i =0; i < bin_num; i++){
 				double range_avg = (h->range[i] + h->range[i+1])/2.0;
 				Point param = Point(DIMENSION,range_avg);  //this might need to change in multidimensions
 														   // i.e. make a point object constructor Point(int, vector)
 														   
 				double f1 = posterior_distribution(param)/norm;
 				double f2 = h->bin[i]/((h->range[i+1] - h->range[i])*(step/thin_factor));
 				conv += loss(f1,f2);
 			}
 			convergence.push_back(conv);
    	}
    }
コード例 #8
0
void MaxMarginLossLayer<Dtype>::Forward_cpu(
		const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top) {
	int count = bottom[0]->count();
	caffe_sub(
			count,
			bottom[0]->cpu_data(),  // a
			bottom[1]->cpu_data(),  // b
			diff_.mutable_cpu_data());  // a_i-b_i
	const int channels = bottom[0]->channels();
	Dtype margin = this->layer_param_.max_margin_loss_param().margin();
	Dtype loss(0.0);
	for (int i = 0; i < bottom[0]->num(); ++i) {
		dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels,
				diff_.cpu_data() + (i*channels), diff_.cpu_data() + (i*channels));
		Dtype yij;
		if (static_cast<int>(bottom[2]->cpu_data()[i])) {  // similar pairs
			yij = 1.0;
		}
		else {  // dissimilar pairs
			yij = -1.0;
		}
		loss += std::max(margin - yij * (b - dist_sq_.cpu_data()[i]), Dtype(0.0));
	}
	// average
	loss = loss / static_cast<Dtype>(bottom[0]->num());
	top[0]->mutable_cpu_data()[0] = loss;
}
コード例 #9
0
TYPED_TEST(TripletLossLayerTest, TestForward) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  TripletLossLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  // manually compute to compare
  const Dtype margin = layer_param.contrastive_loss_param().margin();
  const int num = this->blob_bottom_data_i_->num();
  const int channels = this->blob_bottom_data_i_->channels();
  Dtype loss(0);
  for (int i = 0; i < num; ++i) {
    Dtype dist_sq(0);
    for (int j = 0; j < channels; ++j) {
      Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] -
          this->blob_bottom_data_j_->cpu_data()[i*channels+j];
      dist_sq += diff*diff;
    }
    if (this->blob_bottom_y_->cpu_data()[i]) {  // similar pairs
      loss += dist_sq;
    } else {
      loss += std::max(margin-dist_sq, Dtype(0));
    }
  }
  loss /= static_cast<Dtype>(num) * Dtype(2);
  EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6);
}
コード例 #10
0
ファイル: RAE.cpp プロジェクト: zerkh/RAE
lbfgsfloatval_t RAE::_training(lbfgsfloatval_t* g)
{
	lbfgsfloatval_t error = 0;

	for(map<string, int>::iterator it = trainingData.begin(); it != trainingData.end(); it++)
	{
		//获取实例
		buildTree(it->first);	

		error += loss();

		MatrixLBFGS delta_parent = MatrixLBFGS(1, vecSize);
		delta_parent.setZero();

		//对rae求导
		trainRecError(RAETree->root, delta_parent, it->second);

		delete RAETree;
		RAETree = NULL;
	}

	update(g);

	return error;
}
コード例 #11
0
void TripletRankingHingeLossLayer<Dtype>::Forward_cpu(
	const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
	int dim_v = batch_*dim_;
	const Dtype* sub_or_si;
	const Dtype* sub_or_di;
	Dtype b = 2;
	Dtype Tripletlosstotal(0.0);

	//The triplet ranking loss
	caffe_sub(dim_v, bottom[0]->cpu_data(), bottom[1]->cpu_data(), diff_sub_or_si.mutable_cpu_data()); // F-F+
	caffe_sub(dim_v, bottom[0]->cpu_data(), bottom[2]->cpu_data(), diff_sub_or_di.mutable_cpu_data()); // F-F-
	caffe_powx(dim_v, diff_sub_or_si.cpu_data(), Dtype(2.0), diff_pow_or_si.mutable_cpu_data());		  //Pow
	caffe_powx(dim_v, diff_sub_or_di.cpu_data(), Dtype(2.0), diff_pow_or_di.mutable_cpu_data());       //Pow

	for (int n = 0; n < batch_; n++){
		sub_or_si = diff_pow_or_si.cpu_data() + diff_pow_or_si.offset(n);
		sub_or_di = diff_pow_or_di.cpu_data() + diff_pow_or_di.offset(n);
		Dtype result1 = 0;
		Dtype result2 = 0;
		result1 = caffe_cpu_asum(dim_, sub_or_si);
		result2 = caffe_cpu_asum(dim_, sub_or_di);
		Dtype loss(0.0);
		loss = std::max(margin + result1 - result2, Dtype(0));// compute the loss
		diff_.mutable_cpu_data()[n] = loss; // save the loss[i]
	}
	for (int k = 0; k < batch_; k++){

		dist_sq_.mutable_cpu_data()[k] = diff_.cpu_data()[k];// save the loss[i] for BP
		Tripletlosstotal += dist_sq_.cpu_data()[k];
	}
	Tripletlosstotal = Tripletlosstotal / static_cast<Dtype>(bottom[0]->num()); //get the average loss
	top[0]->mutable_cpu_data()[0] = Tripletlosstotal;
}
コード例 #12
0
void SimonLogic::newUserInput(int input)
{
    //If user click right button, then move to next
    if(input==pattern[playerInputCount])
    {
        playerInputCount++;
        emit updateProgress((int)(playerInputCount*100/pattern.size()));
    }
    else
    {
        emit updateScore(0);
        //Else, emit the signal to indicate losing
        if(score>highScore)
            highScore=score;
        emit loss(highScore);
    }
    //Starting new round
    if(playerInputCount==pattern.size()){
        playerInputCount=0;
        score+=pattern.size()*100;
        //The flasing frequency would be faster in every new term
        if(frequency>300)
            frequency-=100;
        emit updateScore(score);

        addToPattern();
        emit newPattern(pattern,frequency);
    }

}
コード例 #13
0
	Dtype TripletClipHingeLossLayer<Dtype>::compute_tripletloss(int batchsize,
		int Dimv){
		Dtype Tripletlosstotal(0.0);
		const Dtype* sub_or_si;
		const Dtype* sub_or_di;
		//The triplet ranking loss
		caffe_sub(Dimv, ave_or.cpu_data(), ave_si.cpu_data(), diff_sub_or_si.mutable_cpu_data()); // F-F+
		caffe_sub(Dimv, ave_or.cpu_data(), ave_di.cpu_data(), diff_sub_or_di.mutable_cpu_data()); // F-F-
		caffe_powx(Dimv, diff_sub_or_si.cpu_data(), Dtype(2.0), diff_pow_or_si.mutable_cpu_data());		  //Pow
		caffe_powx(Dimv, diff_sub_or_di.cpu_data(), Dtype(2.0), diff_pow_or_di.mutable_cpu_data());       //Pow
		for (int n = 0; n < batchsize; n++)
		{
			sub_or_si = diff_pow_or_si.cpu_data() + diff_pow_or_si.offset(n);
			sub_or_di = diff_pow_or_di.cpu_data() + diff_pow_or_di.offset(n);
			Dtype result1 = 0;
			Dtype result2 = 0;
			result1 = caffe_cpu_asum(dim, sub_or_si);
			result2 = caffe_cpu_asum(dim, sub_or_di);
			Dtype loss(0.0);
			loss = std::max(margin + result1 - result2, Dtype(FLT_MIN));// compute the loss
			diff_.mutable_cpu_data()[n] = loss; // save the loss[i]
		}
		for (int k = 0; k < batchsize; k++)
		{

			dist_sq_.mutable_cpu_data()[k] = diff_.cpu_data()[k];// save the loss[i] for BP
			Tripletlosstotal += dist_sq_.cpu_data()[k];
		}
		return Tripletlosstotal / static_cast<Dtype>(batchsize);
	}
コード例 #14
0
double current_obj_val(EXAMPLE *ex, SVECTOR **fycache, long m, STRUCTMODEL *sm, STRUCT_LEARN_PARM *sparm, double C, int *valid_examples) {

  long i, j;
  SVECTOR *f, *fy, *fybar, *lhs;
  LABEL       ybar;
  double lossval, margin;
  double *new_constraint;
	double obj = 0.0;

  /* find cutting plane */
  lhs = NULL;
  margin = 0;
  for (i=0;i<m;i++) {
		if(!valid_examples[i])
			continue;
    find_most_violated_constraint_marginrescaling(ex[i].x, ex[i].y, &ybar, sm, sparm);
    /* get difference vector */
    fy = copy_svector(fycache[i]);
    fybar = psi(ex[i].x,ybar,sm,sparm);
    lossval = loss(ex[i].y,ybar,sparm);

    /* scale difference vector */
    for (f=fy;f;f=f->next) {
      //f->factor*=1.0/m;
      f->factor*=ex[i].x.example_cost/m;
    }
    for (f=fybar;f;f=f->next) {
      //f->factor*=-1.0/m;
      f->factor*=-ex[i].x.example_cost/m;
    }
    /* add ybar to constraint */
    append_svector_list(fy,lhs);
    append_svector_list(fybar,fy);
    lhs = fybar;
    //margin+=lossval/m;
		margin += lossval*ex[i].x.example_cost/m;
  }

  /* compact the linear representation */
  new_constraint = add_list_nn(lhs, sm->sizePsi);
  free_svector(lhs);

	obj = margin;
	for(i = 1; i < sm->sizePsi+1; i++)
		obj -= new_constraint[i]*sm->w[i];
	if(obj < 0.0)
		obj = 0.0;
	obj *= C;
	for(i = 1; i < sm->sizePsi+1; i++)
		obj += 0.5*sm->w[i]*sm->w[i];
  free(new_constraint);

	return obj;
}
コード例 #15
0
ファイル: nn_rprop.cpp プロジェクト: linxihui/deepLearning
VectorXd feedForwardNetwork::rpropTrain(MatrixXd x, MatrixXd y, int numepochs, int batchsize, double incScale, double decScale, double incScaleMax, double decScaleMin, bool verbose) {
	// initialize training parameters
	std::vector<MatrixXd> dW(layer_size.size()-1);
	std::vector<VectorXd> dB(layer_size.size()-1);
	std::vector<ArrayXXi> signDeltaW(layer_size.size()-1);
	std::vector<ArrayXi> signDeltaB(layer_size.size()-1);

	for(int i = 0; i < layer_size.size()-1; i++) {
		dW[i].setConstant(layer_size[i+1], layer_size[i], 0.1);
		dB[i].setConstant(layer_size[i+1], 0.1);
		signDeltaW[i].setZero(layer_size[i+1], layer_size[i]);
		signDeltaB[i].setZero(layer_size[i+1]);
		}

	long n_sample = x.cols();
	if (batchsize > n_sample) batchsize = n_sample;
	int n_batch = n_sample / batchsize; // truncated if not divided
	int remainder = n_sample - n_batch*batchsize;

	int n_batch2 = n_batch; // n_batch2 is the actual batch number
	if (remainder > 0) n_batch2++;

	int s = 0;  // update iteration, total iteration = numepoch x numbatch
	VectorXd loss(numepochs*n_batch2);  // mean sum of square error/loss
	MatrixXd error;  //raw error: per sample per output dimension
	error.setConstant(numepochs, n_batch2, -1);
	PermutationMatrix<Dynamic, Dynamic> perm(n_sample);

	MatrixXd x_perm(x);
	MatrixXd y_perm(y);

	for (int i = 0; i < numepochs; i++) {
		if (verbose) cout <<  "Epoch " << i + 1 << endl;
		perm.setIdentity();
		random_shuffle(perm.indices().data(), perm.indices().data() + perm.indices().size());
		x_perm = x_perm * perm;  // col = sample, shuffle samples
		y_perm = y_perm * perm;
		int this_batchsize = batchsize;

		for(int j = 0; j < n_sample; j +=batchsize) {
			if (j >= n_sample - remainder) this_batchsize = remainder;
			error = ff(x_perm.middleCols(j, this_batchsize), y_perm.middleCols(j,  this_batchsize));
			rprop(error, dW, dB, signDeltaW, signDeltaB, incScale, decScale, incScaleMax, decScaleMin);
			if (output == "softmax") {
				loss[s] = -(y_perm.middleCols(j, this_batchsize).array() * post[layer_size.size()-1].array().log()).colwise().sum().mean();
			} else {
				loss[s] = error.array().square().mean();
				}
			s++;
			}
		}
	return loss;
	}
コード例 #16
0
int main(int argc, char* argv[]) {
  double avgloss,l;
  long i, correct;

  char testfile[1024];
  char modelfile[1024];

  STRUCTMODEL model;
  STRUCT_LEARN_PARM sparm;
  LEARN_PARM lparm;
  KERNEL_PARM kparm;

  SAMPLE testsample;
  LABEL y;
  LATENT_VAR h; 

  /* read input parameters */
  read_input_parameters(argc,argv,testfile,modelfile,&sparm);

  /* read model file */
  printf("Reading model..."); fflush(stdout);
//  model = read_struct_model(modelfile, &sparm);
  printf("done.\n"); 

  /* read test examples */
  printf("Reading test examples..."); fflush(stdout);
  testsample = read_struct_examples(testfile,&sparm);
  printf("done.\n");

  init_struct_model(testsample,&model,&sparm,&lparm,&kparm);
  
  avgloss = 0.0;
  correct = 0;
  for (i=0;i<testsample.n;i++) {
    classify_struct_example(testsample.examples[i].x,&y,&h,&model,&sparm);
    l = loss(testsample.examples[i].y,y,h,&sparm);
    avgloss += l;
    if (l==0) correct++;

    free_label(y);
    free_latent_var(h); 
  }

  printf("Average loss on test set: %.4f\n", avgloss/testsample.n);
  printf("Zero/one error on test set: %.4f\n", 1.0 - ((float) correct)/testsample.n);

  free_struct_sample(testsample);
  free_struct_model(model,&sparm);

  return(0);

}
コード例 #17
0
ファイル: 508A.cpp プロジェクト: Oliverlyn/ACM
int main(){
    int n,m,k,i,j,x,y;
    while(scanf("%d%d%d",&n,&m,&k)!=EOF){
        memset(map,0,sizeof(map));
        for(i=0,j=0;i<k;i++){
            scanf("%d%d",&x,&y);
            map[x][y]=1;
            if(!j&&loss(x-1,y-1))
                j=i+1;
        }
        printf("%d\n",j);
    }
}
コード例 #18
0
ファイル: svm_struct_learn.cpp プロジェクト: aa755/cfg3d
void find_most_violated_constraint(SVECTOR **fydelta, double *rhs, 
				   EXAMPLE *ex, SVECTOR *fycached, long n, 
				   STRUCTMODEL *sm, STRUCT_LEARN_PARM *sparm,
				   double *rt_viol, double *rt_psi, 
				   long *argmax_count)
     /* returns fydelta=fy-fybar and rhs scalar value that correspond
	to the most violated constraint for example ex */
{
  double      rt2=0;
  LABEL       ybar;
  SVECTOR     *fybar, *fy;
  double      factor,lossval;

  if(struct_verbosity>=2) rt2=get_runtime();
  (*argmax_count)++;
  if(sparm->loss_type == SLACK_RESCALING) 
    ybar=find_most_violated_constraint_slackrescaling(ex->x,ex->y,sm,sparm);
  else
    ybar=find_most_violated_constraint_marginrescaling(ex->x,ex->y,sm,sparm);
  if(struct_verbosity>=2) (*rt_viol)+=MAX(get_runtime()-rt2,0);
  
  if(empty_label(ybar)) {
    printf("ERROR: empty label was returned for example\n");
    /* exit(1); */
    /* continue; */
  }
  
  /**** get psi(x,y) and psi(x,ybar) ****/
  if(struct_verbosity>=2) rt2=get_runtime();
  if(fycached)
    fy=copy_svector(fycached); 
  else 
    fy=psi(ex->x,ex->y,sm,sparm);
  fybar=psi(ex->x,ybar,sm,sparm);
  if(struct_verbosity>=2) (*rt_psi)+=MAX(get_runtime()-rt2,0);
  lossval=loss(ex->y,ybar,sparm);
  free_label(ybar);
  
  /**** scale feature vector and margin by loss ****/
  if(sparm->loss_type == SLACK_RESCALING)
    factor=lossval/n;
  else                 /* do not rescale vector for */
    factor=1.0/n;      /* margin rescaling loss type */
  mult_svector_list(fy,factor);
  mult_svector_list(fybar,-factor);
  append_svector_list(fybar,fy);   /* compute fy-fybar */

  (*fydelta)=fybar;
  (*rhs)=lossval/n;
}
コード例 #19
0
ファイル: svm_struct_api.cpp プロジェクト: JackZZhang/iPM3F
LABEL       find_most_violated_constraint_slackrescaling(PATTERNX x, LABEL y, 
						     STRUCTMODEL *sm, 
						     STRUCT_LEARN_PARM *sparm)
{
  /* Finds the label ybar for pattern x that that is responsible for
     the most violated constraint for the slack rescaling
     formulation. It has to take into account the scoring function in
     sm, especially the weights sm.w, as well as the loss
     function. The weights in sm.w correspond to the features defined
     by psi() and range from index 1 to index sm->sizePsi. Most simple
     is the case of the zero/one loss function. For the zero/one loss,
     this function should return the highest scoring label ybar, if
     ybar is unequal y; if it is equal to the correct label y, then
     the function shall return the second highest scoring label. If
     the function cannot find a label, it shall return an empty label
     as recognized by the function empty_label(y). */
  LABEL ybar;
  DOC doc;
  long classlabel, bestclass=-1, first=1;
  double score, score_y, score_ybar, bestscore=-1;

  /* NOTE: This function could be made much more efficient by not
     always computing a new PSI vector. */
  doc = *(x.doc);
  doc.fvec = psi(x,y,sm,sparm);
  score_y = classify_example(sm->svm_model,&doc);
  free_svector(doc.fvec);

  ybar.scores = NULL;
  ybar.num_classes = sparm->num_classes;
  for(classlabel=1; classlabel<=sparm->num_classes; classlabel++) {
    ybar.classlabel = classlabel;
    doc.fvec=psi(x,ybar,sm,sparm);
    score_ybar=classify_example(sm->svm_model,&doc);
    free_svector(doc.fvec);
    score=loss(y,ybar,sparm,x.doc->fvec)*(1.0-score_y+score_ybar);
    if((bestscore<score)  || (first)) {
      bestscore=score;
      bestclass = classlabel;
      first=0;
    }
  }
  if(bestclass == -1) 
    printf("ERROR: Only one class\n");
  ybar.classlabel = bestclass;
  if(struct_verbosity>=3)
    printf("[%ld:%.2f] ",bestclass,bestscore);
  return(ybar);
}
コード例 #20
0
double compute_current_loss(SAMPLE val, STRUCTMODEL *sm, STRUCT_LEARN_PARM *sparm)
{
	long i;
	LABEL y;
	double cur_loss = 0.0;
	double store;
	for(i = 0; i < val.n; i++)
	{
		classify_struct_example(val.examples[i].x,&y,sm,sparm);
		store = loss(val.examples[i].y,y,sparm);
		cur_loss += store;
	}

	cur_loss /= (double) val.n;
	return cur_loss;
}
コード例 #21
0
void SigmoidCrossEntropyLossLayer<Dtype,Mtype>::Forward_cpu(
    const vector<Blob<Dtype,Mtype>*>& bottom, const vector<Blob<Dtype,Mtype>*>& top) {
  // The forward pass computes the sigmoid outputs.
  sigmoid_bottom_vec_[0] = bottom[0];
  sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
  // Compute the loss (negative log likelihood)
  const int count = bottom[0]->count();
  const int num = bottom[0]->num();
  // Stable version of loss computation from input data
  const Dtype* input_data = bottom[0]->cpu_data();
  const Dtype* target = bottom[1]->cpu_data();
  Mtype loss(0.f);
  for (int i = 0; i < count; ++i) {
    Mtype input_val = Get<Mtype>(input_data[i]);
    loss -= input_val * (Get<Mtype>(target[i]) - (input_val >= 0)) -
        log(1 + exp(input_val - 2 * input_val * (input_val >= 0)));
  }
  top[0]->mutable_cpu_data()[0] = Get<Dtype>(loss / num);
}
コード例 #22
0
void TripletLossLayer<Dtype>::Forward_cpu(  
    const vector<Blob<Dtype>*>& bottom,  
    const vector<Blob<Dtype>*>& top) {  
  int count = bottom[0]->count();  
  const Dtype* sampleW = bottom[3]->cpu_data();  
  caffe_sub(  
      count,  
      bottom[0]->cpu_data(),  // a  
      bottom[1]->cpu_data(),  // p  
      diff_ap_.mutable_cpu_data());  // a_i-p_i  
  caffe_sub(  
       count,  
       bottom[0]->cpu_data(),  // a  
       bottom[2]->cpu_data(),  // n  
       diff_an_.mutable_cpu_data());  // a_i-n_i  
  caffe_sub(  
       count,  
       bottom[1]->cpu_data(),  // p  
       bottom[2]->cpu_data(),  // n  
       diff_pn_.mutable_cpu_data());  // p_i-n_i  
  const int channels = bottom[0]->channels();  
  Dtype margin = this->layer_param_.triplet_loss_param().margin();  
  
  Dtype loss(0.0);  
  for (int i = 0; i < bottom[0]->num(); ++i) {  
    dist_sq_ap_.mutable_cpu_data()[i] = caffe_cpu_dot(channels,  
        diff_ap_.cpu_data() + (i*channels), diff_ap_.cpu_data() + (i*channels));  
    dist_sq_an_.mutable_cpu_data()[i] = caffe_cpu_dot(channels,  
        diff_an_.cpu_data() + (i*channels), diff_an_.cpu_data() + (i*channels));  
    Dtype mdist = sampleW[i]*std::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0));  
    loss += mdist;  
    if(mdist==Dtype(0)){  
        //dist_binary_.mutable_cpu_data()[i] = Dtype(0);  
        //prepare for backward pass  
        caffe_set(channels, Dtype(0), diff_ap_.mutable_cpu_data() + (i*channels));  
        caffe_set(channels, Dtype(0), diff_an_.mutable_cpu_data() + (i*channels));  
        caffe_set(channels, Dtype(0), diff_pn_.mutable_cpu_data() + (i*channels));  
    }  
  }  
  loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);  
  top[0]->mutable_cpu_data()[0] = loss;  
}  
コード例 #23
0
ファイル: medstc.cpp プロジェクト: aykutfirat/MedSTC-R
// find the loss-augmented prediction for one document.
void MedSTC::loss_aug_predict(Document *doc, double *theta)
{
	doc->lossAugLabel = -1;
	double dMaxScore = 0;
	int etaIx = 0;
	for ( int y=0; y<m_nLabelNum; y++ ) {
		double dScore = 0;
		for ( int k=0; k<m_nK; k++ ) {
			dScore += theta[k] * m_dEta[etaIx];
			etaIx ++;
		}
		dScore -= m_dB;
		dScore += loss(y, doc->gndlabel);

		if ( doc->lossAugLabel == -1 || dScore > dMaxScore ) {
			doc->lossAugLabel = y;
			dMaxScore = dScore;
		}
	}
}
コード例 #24
0
ファイル: SlamDriver.cpp プロジェクト: meiroo/dtslam
void SlamDriver::resyncTracker()
{
	//This function alternates between using 2D features and not, resyncing the tracker after, so that the effect of the 2D featues can be clearly observed.

	shared_lock_guard<shared_mutex> lock(mSlam.getMap().getMutex());

	FLAGS_PoseUse2D = !FLAGS_PoseUse2D;

	DTSLAM_LOG << "\nResyncing, PoseUse2D=" << FLAGS_PoseUse2D << "\n";

	mSlam.getTracker().resync();

	mActiveWindow->updateState();

	DTSLAM_LOG << "\nResyncing, PoseUse2D=" << FLAGS_PoseUse2D << ", done\n";

	ceres::CauchyLoss loss(3);
	double robustError[3];

	auto &matches = mSlam.getTracker().getMatches();
	auto &errors = mSlam.getTracker().getReprojectionErrors();

	float total3D = 0;
	float total2D = 0;
	for (int i = 0, end = matches.size(); i != end; ++i)
	{
		auto &match = matches[i];
		auto &err = errors[i];

		loss.Evaluate(err.bestReprojectionErrorSq, robustError);

		if (match.measurement.getFeature().is3D())
			total3D += (float)robustError[0];
		else
			total2D += (float)robustError[0];
	}

	DTSLAM_LOG << "3D total error=" << total3D << "\n";
	DTSLAM_LOG << "2D total error=" << total2D << "\n";
	DTSLAM_LOG << "Total error=" << total3D+total2D << "\n";
}
コード例 #25
0
CCACHE *create_constraint_cache(SAMPLE sample, STRUCT_LEARN_PARM *sparm)
     /* create new constraint cache for training set */
{
  long        n=sample.n;
  EXAMPLE     *ex=sample.examples;
  CCACHE      *ccache;
  int         i;

  ccache=(CCACHE *)malloc(sizeof(CCACHE));
  ccache->n=n;
  ccache->constlist=(CCACHEELEM **)malloc(sizeof(CCACHEELEM *)*n);
  for(i=0;i<n;i++) { 
    /* add constraint for ybar=y to cache */
    ccache->constlist[i]=(CCACHEELEM *)malloc(sizeof(CCACHEELEM));
    ccache->constlist[i]->fydelta=create_svector_n(NULL,0,"",1);
    ccache->constlist[i]->rhs=loss(ex[i].y,ex[i].y,sparm)/n;
    ccache->constlist[i]->viol=0;
    ccache->constlist[i]->next=NULL;
  }
  return(ccache);
}
コード例 #26
0
	void TripletLossLayer<Dtype>::Forward_cpu(
		const vector<Blob<Dtype>*> & bottom, const vector<Blob<Dtype>*> & top){
		int count = bottom[0]->count();//count= n * c * h * w
//		const Dtype* sampleW = bottom[3]->cpu_data(); // 1
		caffe_sub(
			count, 
			bottom[0]->cpu_data(), // a
			bottom[1]->cpu_data(), //p
			diff_ap_.mutable_cpu_data()); // diff_ap_= a - p
		caffe_sub(
			count,
			bottom[0]->cpu_data(), //a
			bottom[2]->cpu_data(), //n
			diff_an_.mutable_cpu_data()); // diff_an_ = a - n
		caffe_sub(
			count, 
			bottom[1]->cpu_data(), //p
			bottom[2]->cpu_data(), //n
			diff_pn_.mutable_cpu_data() // diff_pn_ = p - n
			);
		const int channels = bottom[0]->channels();
		Dtype margin = this->layer_param_.triplet_loss_param().margin();// alpha
		Dtype loss(0.0); //record the  loss of this batch.
		for(int i = 0; i < bottom[0]->num(); ++i) {//for all triplet
			dist_sq_ap_.mutable_cpu_data()[i] = caffe_cpu_dot(
				channels, diff_ap_.cpu_data() + (i*channels), diff_ap_.cpu_data() + (i * channels));
			dist_sq_an_.mutable_cpu_data()[i] = caffe_cpu_dot(
				channels, diff_an_.cpu_data() + (i * channels), diff_an_.cpu_data() + (i * channels));
			//mdist= one triplet loss
			Dtype mdist =  std::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0));
			loss += mdist;
			if(mdist == Dtype(0)){
				caffe_set(channels, Dtype(0), diff_ap_.mutable_cpu_data() + (i * channels));
				caffe_set(channels, Dtype(0), diff_an_.mutable_cpu_data() + (i * channels));
				caffe_set(channels, Dtype(0), diff_pn_.mutable_cpu_data() + (i * channels));
			}
		}
		loss = loss/static_cast<Dtype>(bottom[0]->num())/Dtype(2);
		top[0]->mutable_cpu_data()[0] = loss;
	}
コード例 #27
0
ファイル: svm_struct_learn.c プロジェクト: atreyee-m/nlp
void svm_learn_struct(SAMPLE sample, STRUCT_LEARN_PARM *sparm,
		      LEARN_PARM *lparm, KERNEL_PARM *kparm, 
		      STRUCTMODEL *sm)
{
  int         i,j;
  int         numIt=0;
  long        newconstraints=0, activenum=0; 
  int         opti_round, *opti;
  long        old_numConst=0;
  double      epsilon;
  long        tolerance;
  double      lossval,factor;
  double      margin=0;
  double      slack, *slacks, slacksum;
  long        sizePsi;
  double      *alpha=NULL;
  CONSTSET    cset;
  SVECTOR     *diff=NULL;
  SVECTOR     *fy, *fybar, *f;
  SVECTOR     *slackvec;
  WORD        slackv[2];
  MODEL       *svmModel=NULL;
  KERNEL_CACHE *kcache=NULL;
  LABEL       ybar;
  DOC         *doc;

  long        n=sample.n;
  EXAMPLE     *ex=sample.examples;
  double      rt_total=0.0, rt_opt=0.0;
  long        rt1,rt2;

  init_struct_model(sample,sm,sparm); 
  sizePsi=sm->sizePsi+1;          /* sm must contain size of psi on return */

  /* initialize example selection heuristic */ 
  opti=(int*)my_malloc(n*sizeof(int));
  for(i=0;i<n;i++) {
    opti[i]=0;
  }
  opti_round=0;

  if(sparm->slack_norm == 1) {
    lparm->svm_c=sparm->C;          /* set upper bound C */
    lparm->sharedslack=1;
  }
  else if(sparm->slack_norm == 2) {
    lparm->svm_c=999999999999999.0; /* upper bound C must never be reached */
    lparm->sharedslack=0;
    if(kparm->kernel_type != LINEAR) {
      printf("ERROR: Kernels are not implemented for L2 slack norm!"); 
      fflush(stdout);
      exit(0);
    }
  }
  else {
    printf("ERROR: Slack norm must be L1 or L2!"); fflush(stdout);
    exit(0);
  }


  epsilon=1.0;                    /* start with low precision and
				     increase later */
  tolerance=n/100;                /* increase precision, whenever less
                                     than that number of constraints
                                     is not fulfilled */
  lparm->biased_hyperplane=0;     /* set threshold to zero */

  cset=init_struct_constraints(sample, sm, sparm);
  if(cset.m > 0) {
    alpha=realloc(alpha,sizeof(double)*cset.m);
    for(i=0; i<cset.m; i++) 
      alpha[i]=0;
  }

  /* set initial model and slack variables*/
  svmModel=(MODEL *)my_malloc(sizeof(MODEL));
  svm_learn_optimization(cset.lhs,cset.rhs,cset.m,sizePsi+n,
			 lparm,kparm,NULL,svmModel,alpha);
  add_weight_vector_to_linear_model(svmModel);
  sm->svm_model=svmModel;
  sm->w=svmModel->lin_weights; /* short cut to weight vector */

  printf("Starting Iterations\n");

    /*****************/
   /*** main loop ***/
  /*****************/
  do { /* iteratively increase precision */

    epsilon=MAX(epsilon*0.09999999999,sparm->epsilon);
    if(epsilon == sparm->epsilon)   /* for final precision, find all SV */
      tolerance=0;
    lparm->epsilon_crit=epsilon/2;  /* svm precision must be higher than eps */
    if(struct_verbosity>=1)
      printf("Setting current working precision to %g.\n",epsilon);

    do { /* iteration until (approx) all SV are found for current
            precision and tolerance */
      
      old_numConst=cset.m;
      opti_round++;
      activenum=n;

      do { /* go through examples that keep producing new constraints */

	if(struct_verbosity>=1) { 
	  printf("--Iteration %i (%ld active): ",++numIt,activenum); 
	  fflush(stdout);
	}
	
	for(i=0; i<n; i++) { /*** example loop ***/
	  
	  rt1=get_runtime();
	    
	  if(opti[i] != opti_round) {/* if the example is not shrunk
	                                away, then see if it is necessary to 
					add a new constraint */
	    if(sparm->loss_type == SLACK_RESCALING) 
	      ybar=find_most_violated_constraint_slackrescaling(ex[i].x,
								ex[i].y,sm,
								sparm);
	    else
	      ybar=find_most_violated_constraint_marginrescaling(ex[i].x,
								 ex[i].y,sm,
								 sparm);
	    
	    if(empty_label(ybar)) {
	      if(opti[i] != opti_round) {
		activenum--;
		opti[i]=opti_round; 
	      }
	      if(struct_verbosity>=2)
		printf("no-incorrect-found(%i) ",i);
	      continue;
	    }
	  
	    /**** get psi(y)-psi(ybar) ****/
	    fy=psi(ex[i].x,ex[i].y,sm,sparm);
	    fybar=psi(ex[i].x,ybar,sm,sparm);
	    
	    /**** scale feature vector and margin by loss ****/
	    lossval=loss(ex[i].y,ybar,sparm);
	    if(sparm->slack_norm == 2)
	      lossval=sqrt(lossval);
	    if(sparm->loss_type == SLACK_RESCALING)
	      factor=lossval;
	    else               /* do not rescale vector for */
	      factor=1.0;      /* margin rescaling loss type */
	    for(f=fy;f;f=f->next)
	      f->factor*=factor;
	    for(f=fybar;f;f=f->next)
	      f->factor*=-factor;
	    margin=lossval;

	    /**** create constraint for current ybar ****/
	    append_svector_list(fy,fybar);/* append the two vector lists */
	    doc=create_example(cset.m,0,i+1,1,fy);

	    /**** compute slack for this example ****/
	    slack=0;
	    for(j=0;j<cset.m;j++) 
	      if(cset.lhs[j]->slackid == i+1) {
		if(sparm->slack_norm == 2) /* works only for linear kernel */
		  slack=MAX(slack,cset.rhs[j]
			          -(classify_example(svmModel,cset.lhs[j])
				    -sm->w[sizePsi+i]/(sqrt(2*sparm->C))));
		else
		  slack=MAX(slack,
			   cset.rhs[j]-classify_example(svmModel,cset.lhs[j]));
	      }
	    
	    /**** if `error' add constraint and recompute ****/
	    if((classify_example(svmModel,doc)+slack)<(margin-epsilon)) { 
	      if(struct_verbosity>=2)
		{printf("(%i) ",i); fflush(stdout);}
	      if(struct_verbosity==1)
		{printf("."); fflush(stdout);}
	      
	      /**** resize constraint matrix and add new constraint ****/
	      cset.m++;
	      cset.lhs=realloc(cset.lhs,sizeof(DOC *)*cset.m);
	      if(kparm->kernel_type == LINEAR) {
		diff=add_list_ss(fy); /* store difference vector directly */
		if(sparm->slack_norm == 1) 
		  cset.lhs[cset.m-1]=create_example(cset.m-1,0,i+1,1,
						    copy_svector(diff));
		else if(sparm->slack_norm == 2) {
		  /**** add squared slack variable to feature vector ****/
		  slackv[0].wnum=sizePsi+i;
		  slackv[0].weight=1/(sqrt(2*sparm->C));
		  slackv[1].wnum=0; /*terminator*/
		  slackvec=create_svector(slackv,"",1.0);
		  cset.lhs[cset.m-1]=create_example(cset.m-1,0,i+1,1,
						    add_ss(diff,slackvec));
		  free_svector(slackvec);
		}
		free_svector(diff);
	      }
	      else { /* kernel is used */
		if(sparm->slack_norm == 1) 
		  cset.lhs[cset.m-1]=create_example(cset.m-1,0,i+1,1,
						    copy_svector(fy));
		else if(sparm->slack_norm == 2)
		  exit(1);
	      }
	      cset.rhs=realloc(cset.rhs,sizeof(double)*cset.m);
	      cset.rhs[cset.m-1]=margin;
	      alpha=realloc(alpha,sizeof(double)*cset.m);
	      alpha[cset.m-1]=0;
	      newconstraints++;
	    }
	    else {
	      printf("+"); fflush(stdout); 
	      if(opti[i] != opti_round) {
		activenum--;
		opti[i]=opti_round; 
	      }
	    }

	    free_example(doc,0);
	    free_svector(fy); /* this also free's fybar */
	    free_label(ybar);
	  }

	  /**** get new QP solution ****/
	  if((newconstraints >= sparm->newconstretrain) 
	     || ((newconstraints > 0) && (i == n-1))) {
	    if(struct_verbosity>=1) {
	      printf("*");fflush(stdout);
	    }
	    rt2=get_runtime();
	    free_model(svmModel,0);
	    svmModel=(MODEL *)my_malloc(sizeof(MODEL));
	    /* Always get a new kernel cache. It is not possible to use the
	       same cache for two different training runs */
	    if(kparm->kernel_type != LINEAR)
	      kcache=kernel_cache_init(cset.m,lparm->kernel_cache_size);
	    /* Run the QP solver on cset. */
	    svm_learn_optimization(cset.lhs,cset.rhs,cset.m,sizePsi+n,
				   lparm,kparm,kcache,svmModel,alpha);
	    if(kcache)
	      kernel_cache_cleanup(kcache);
	    /* Always add weight vector, in case part of the kernel is
	       linear. If not, ignore the weight vector since its
	       content is bogus. */
	    add_weight_vector_to_linear_model(svmModel);
	    sm->svm_model=svmModel;
	    sm->w=svmModel->lin_weights; /* short cut to weight vector */
	    rt_opt+=MAX(get_runtime()-rt2,0);
	    
	    newconstraints=0;
	  }	

	  rt_total+=MAX(get_runtime()-rt1,0);
	} /* end of example loop */

	if(struct_verbosity>=1)
	  printf("(NumConst=%d, SV=%ld, Eps=%.4f)\n",cset.m,svmModel->sv_num-1,
		 svmModel->maxdiff);

      } while(activenum > 0);   /* repeat until all examples produced no
				   constraint at least once */

    } while((cset.m - old_numConst) > tolerance) ;

  } while(epsilon > sparm->epsilon);  

  if(struct_verbosity>=1) {
    /**** compute sum of slacks ****/
    slacks=(double *)my_malloc(sizeof(double)*(n+1));
    for(i=0; i<=n; i++) { 
      slacks[i]=0;
    }
    if(sparm->slack_norm == 1) {
      for(j=0;j<cset.m;j++) 
	slacks[cset.lhs[j]->slackid]=MAX(slacks[cset.lhs[j]->slackid],
			   cset.rhs[j]-classify_example(svmModel,cset.lhs[j]));
      }
    else if(sparm->slack_norm == 2) {
      for(j=0;j<cset.m;j++) 
	slacks[cset.lhs[j]->slackid]=MAX(slacks[cset.lhs[j]->slackid],
		cset.rhs[j]
	         -(classify_example(svmModel,cset.lhs[j])
		   -sm->w[sizePsi+cset.lhs[j]->slackid-1]/(sqrt(2*sparm->C))));
    }
    slacksum=0;
    for(i=0; i<=n; i++)  
      slacksum+=slacks[i];
    free(slacks);

    printf("Final epsilon on KKT-Conditions: %.5f\n",
	   MAX(svmModel->maxdiff,epsilon));
    printf("Total number of constraints added: %i\n",(int)cset.m);
    if(sparm->slack_norm == 1) {
      printf("Number of SV: %ld \n",svmModel->sv_num-1);
      printf("Number of non-zero slack variables: %ld (out of %ld)\n",
	     svmModel->at_upper_bound,n);
      printf("Norm of weight vector: |w|=%.5f\n",
	     model_length_s(svmModel,kparm));
    }
    else if(sparm->slack_norm == 2){ 
      printf("Number of SV: %ld (including %ld at upper bound)\n",
	     svmModel->sv_num-1,svmModel->at_upper_bound);
      printf("Norm of weight vector (including L2-loss): |w|=%.5f\n",
	     model_length_s(svmModel,kparm));
    }
    printf("Sum of slack variables: sum(xi_i)=%.5f\n",slacksum);
    printf("Norm of longest difference vector: ||Psi(x,y)-Psi(x,ybar)||=%.5f\n",
	   length_of_longest_document_vector(cset.lhs,cset.m,kparm));
    printf("Runtime in cpu-seconds: %.2f (%.2f%% for SVM optimization)\n",
	   rt_total/100.0, 100.0*rt_opt/rt_total);
  }
  if(struct_verbosity>=4)
    printW(sm->w,sizePsi,n,lparm->svm_c);

  if(svmModel) {
    sm->svm_model=copy_model(svmModel);
    sm->w=sm->svm_model->lin_weights; /* short cut to weight vector */
  }

  print_struct_learning_stats(sample,sm,cset,alpha,sparm);

  if(svmModel)
    free_model(svmModel,0);
  free(alpha); 
  free(opti); 
  free(cset.rhs); 
  for(i=0;i<cset.m;i++) 
    free_example(cset.lhs[i],1);
  free(cset.lhs);
}
コード例 #28
0
SVECTOR* find_cutting_plane(EXAMPLE *ex, SVECTOR **fycache, double *margin, long m, STRUCTMODEL *sm,
		STRUCT_LEARN_PARM *sparm, char* tmpdir, char *trainfile, double frac_sim, double Fweight,
		char *dataset_stats_file, double rho_admm, long isExhaustive, long isLPrelaxation,
		double *margin2, int datasetStartIdx, int chunkSz, int eid, int chunkid) {

  long i;
  SVECTOR *f, *fy, *fybar, *lhs;
  LABEL       ybar;
  LATENT_VAR hbar;
  double lossval;
  double *new_constraint;

  long l,k;
  SVECTOR *fvec;
  WORD *words;  

  LABEL       *ybar_all = (LABEL*) malloc(sizeof(LABEL) * m);
  LATENT_VAR *hbar_all = (LATENT_VAR*) malloc (sizeof(LATENT_VAR) * m);
  time_t mv_start, mv_end;

  time(&mv_start);
  find_most_violated_constraint_marginrescaling_all_online(ybar_all, hbar_all, sm, sparm, m,
		  tmpdir, trainfile, frac_sim, dataset_stats_file, rho_admm, isExhaustive, isLPrelaxation,
		  Fweight, datasetStartIdx, chunkSz, eid, chunkid);
  time(&mv_end);

#if (DEBUG_LEVEL==1)
  print_time(mv_start, mv_end, "Max violators");
#endif


  /* find cutting plane */
  lhs = NULL;
  lossval = lossF1(ex, m, ybar_all, sparm, Fweight);
  *margin = lossval;

  *margin2 = 0;
  for (i=0;i<m;i++) {
    //find_most_violated_constraint_marginrescaling(ex[i].x, ex[i].y, &ybar, &hbar, sm, sparm);
    ybar = ybar_all[i];
    hbar = hbar_all[i];
    /* get difference vector */
    fy = copy_svector(fycache[i]);
    fybar = psi(ex[i].x,ybar,hbar,sm,sparm);
    lossval = loss(ex[i].y,ybar,hbar,sparm);
    free_label(ybar);
    free_latent_var(hbar);

    /* scale difference vector */
    for (f=fy;f;f=f->next) {
      f->factor*=1.0/m;
      //f->factor*=ex[i].x.example_cost/m;
    }

    for (f=fybar;f;f=f->next) {
      f->factor*=-1.0/m;
      //f->factor*=-ex[i].x.example_cost/m;
    }
    /* add ybar to constraint */
    append_svector_list(fy,lhs);
    append_svector_list(fybar,fy);
    lhs = fybar;
    *margin2+=lossval/m;
    //*margin+=lossval*ex[i].x.example_cost/m;
  }

  free(ybar_all);
  free(hbar_all);

  /* compact the linear representation */
  new_constraint = add_list_nn(lhs, sm->sizePsi);

//  printf("After this segfault ? \n");fflush(stdout);
//  printf("%x\n",new_constraint);

  free_svector(lhs);

  l=0;
  for (i=1;i<sm->sizePsi+1;i++) {
    if (fabs(new_constraint[i])>1E-10) l++; // non-zero
  }
  words = (WORD*)my_malloc(sizeof(WORD)*(l+1)); 
  assert(words!=NULL);
  k=0;
  for (i=1;i<sm->sizePsi+1;i++) {
    if (fabs(new_constraint[i])>1E-10) {
      words[k].wnum = i;
      words[k].weight = new_constraint[i]; 
      k++;
    }
  }
  words[k].wnum = 0;
  words[k].weight = 0.0;
  fvec = create_svector(words,"",1);

  free(words);
  free(new_constraint);

  return(fvec); 

}
コード例 #29
0
ファイル: ResidueDB.cpp プロジェクト: grosenberger/OpenMS
  Residue* ResidueDB::parseResidue_(Map<String, String>& values)
  {
    vector<EmpiricalFormula> low_mass_ions;
    Residue* res_ptr = new Residue();

    for (Map<String, String>::iterator it = values.begin(); it != values.end(); ++it)
    {
      String key(it->first);
      String value(it->second);

      if (key.hasSuffix(":Name"))
      {
        res_ptr->setName(value);
        continue;
      }
      if (key.hasSuffix(":ShortName"))
      {
        res_ptr->setShortName(value);
        continue;
      }
      if (key.hasSuffix(":ThreeLetterCode"))
      {
        res_ptr->setThreeLetterCode(value);
        continue;
      }
      if (key.hasSuffix(":OneLetterCode"))
      {
        res_ptr->setOneLetterCode(value);
        continue;
      }
      if (key.hasSuffix(":Formula"))
      {
        EmpiricalFormula formula(value);
        res_ptr->setFormula(EmpiricalFormula(value));
        res_ptr->setAverageWeight(formula.getAverageWeight());
        res_ptr->setMonoWeight(formula.getMonoWeight());
        continue;
      }

      if (key.hasSubstring(":Losses:LossName"))
      {
        res_ptr->addLossName(value);
        continue;
      }
      if (key.hasSubstring(":Losses:LossFormula"))
      {
        EmpiricalFormula loss(value);
        res_ptr->addLossFormula(loss);
        continue;
      }

      if (key.hasSubstring("NTermLosses:LossName"))
      {
        res_ptr->addNTermLossName(value);
        continue;
      }

      if (key.hasSubstring("NTermLosses:LossFormula"))
      {
        EmpiricalFormula loss(value);
        res_ptr->addNTermLossFormula(loss);
        continue;
      }

      if (key.hasSubstring("LowMassIons"))
      {
        // no markers defined?
        if (!key.hasSuffix(":"))
        {
          low_mass_ions.push_back(EmpiricalFormula(value));
        }
        continue;
      }
      if (key.hasSubstring("Synonyms"))
      {
        // no synonyms defined?
        if (!key.hasSuffix(":"))
        {
          res_ptr->addSynonym(value);
        }
        continue;
      }
      if (key.hasSubstring("pka"))
      {
        // no pka defined?
        if (!key.hasSuffix(":"))
        {
          res_ptr->setPka(value.toDouble());
        }
        continue;
      }
      if (key.hasSubstring("pkb"))
      {
        // no pkb defined?
        if (!key.hasSuffix(":"))
        {
          res_ptr->setPkb(value.toDouble());
        }
        continue;
      }
      if (key.hasSubstring("pkc"))
      {
        // no pkc defined?
        if (!key.hasSuffix(":"))
        {
          res_ptr->setPkc(value.toDouble());
        }
        continue;
      }
      if (key.hasSubstring("GB_SC"))
      {
        res_ptr->setSideChainBasicity(value.toDouble());
        continue;
      }
      if (key.hasSubstring("GB_BB_L"))
      {
        res_ptr->setBackboneBasicityLeft(value.toDouble());
        continue;
      }
      if (key.hasSubstring("GB_BB_R"))
      {
        res_ptr->setBackboneBasicityRight(value.toDouble());
        continue;
      }
      if (key.hasSubstring("ResidueSets"))
      {
        StringList residue_sets = ListUtils::create<String>(value);
        for (StringList::const_iterator local_it = residue_sets.begin(); local_it != residue_sets.end(); ++local_it)
        {
          res_ptr->addResidueSet(*local_it);
          residue_sets_.insert(*local_it);
        }
        continue;
      }
      cerr << "unknown key: " << key << ", with value: " << value << endl;
    }

    if (!low_mass_ions.empty())
    {
      res_ptr->setLowMassIons(low_mass_ions);
    }

    for (set<String>::const_iterator it = res_ptr->getResidueSets().begin(); it != res_ptr->getResidueSets().end(); ++it)
    {
      residues_by_set_[*it].insert(res_ptr);
    }

    return res_ptr;
  }
コード例 #30
0
int main (int argc, char* argv[])
{
  long correct=0,incorrect=0,no_accuracy=0;
  long i;
  double t1,runtime=0;
  double avgloss=0,l;
  FILE *predfl;
  STRUCTMODEL model; 
  STRUCT_LEARN_PARM sparm;
  STRUCT_TEST_STATS teststats;
  SAMPLE testsample;
  LABEL y;

  svm_struct_classify_api_init(argc,argv);

  read_input_parameters(argc,argv,testfile,modelfile,predictionsfile,&sparm,
			&verbosity,&struct_verbosity);

  if(struct_verbosity>=1) {
    printf("Reading model..."); fflush(stdout);
  }
  model=read_struct_model(modelfile,&sparm);
  if(struct_verbosity>=1) {
    fprintf(stdout, "done.\n");
  }

  if(model.svm_model->kernel_parm.kernel_type == LINEAR) { /* linear kernel */
    /* compute weight vector */
    //add_weight_vector_to_linear_model(model.svm_model);
    //model.w=model.svm_model->lin_weights;
  }
  
  if(struct_verbosity>=1) {
    printf("Reading test examples..."); fflush(stdout);
  }
  testsample=read_struct_examples(testfile,&sparm);
  if(struct_verbosity>=1) {
    printf("done.\n"); fflush(stdout);
  }

  if(struct_verbosity>=1) {
    printf("Classifying test examples..."); fflush(stdout);
  }

  if ((predfl = fopen (predictionsfile, "w")) == NULL)
  { perror (predictionsfile); exit (1); }

  for(i=0;i<testsample.n;i++) {
    t1=get_runtime();
    y=classify_struct_example(testsample.examples[i].x,&model,&sparm);
    runtime+=(get_runtime()-t1);

    write_label(predfl,y);
    l=loss(testsample.examples[i].y,y,&sparm);
    avgloss+=l;
    if(l == 0) 
      correct++;
    else
      incorrect++;
    eval_prediction(i,testsample.examples[i],y,&model,&sparm,&teststats);

    if(empty_label(testsample.examples[i].y)) 
      { no_accuracy=1; } /* test data is not labeled */
    if(struct_verbosity>=2) {
      if((i+1) % 100 == 0) {
	printf("%ld..",i+1); fflush(stdout);
      }
    }
    free_label(y);
  }  
  avgloss/=testsample.n;
  fclose(predfl);

  if(struct_verbosity>=1) {
    printf("done\n");
    printf("Runtime (without IO) in cpu-seconds: %.2f\n",
	   (float)(runtime/100.0));    
  }
  if((!no_accuracy) && (struct_verbosity>=1)) {
    printf("Average loss on test set: %.4f\n",(float)avgloss);
    printf("Zero/one-error on test set: %.2f%% (%ld correct, %ld incorrect, %d total)\n",(float)100.0*incorrect/testsample.n,correct,incorrect,testsample.n);
  }
  print_struct_testing_stats(testsample,&model,&sparm,&teststats);
  free_struct_sample(testsample);
  free_struct_model(model);

  svm_struct_classify_api_exit();

  return(0);
}