Esempio n. 1
0
void STLossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {

	vector<int> tot_loss_shape(0);  // Loss layers output a scalar; 0 axes.
	top[0]->Reshape(tot_loss_shape);

	CHECK_EQ(bottom[0]->count(1), 6) << "Inputs theta must have dimension of 6.";

	N = bottom[0]->shape(0);

	vector<int> loss_shape(3);
	loss_shape[0] = N;
	loss_shape[1] = output_H_;
	loss_shape[2] = output_W_;
	loss_.Reshape(loss_shape);

	vector<int> dtheta_tmp_shape(2);
	dtheta_tmp_shape[0] = N * 6;
	dtheta_tmp_shape[1] = output_H_ * output_W_;
	dtheta_tmp_.Reshape(dtheta_tmp_shape);

	vector<int> all_ones_vec_shape(1);
	all_ones_vec_shape[0] = output_H_ * output_W_;
	all_ones_vec_.Reshape(all_ones_vec_shape);
}
Esempio n. 2
0
void LossLayer<Dtype>::Reshape(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  CHECK_EQ(bottom[0]->num(), bottom[1]->num())
      << "The data and label should have the same number.";
  vector<int> loss_shape(0);  // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);
}
Esempio n. 3
0
void L1LossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  vector<int> loss_shape(0);  // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);
  
  if(bottom.size() > 1) {
    diff_layer_->Reshape(bottom, diff_top_vec_);
  }
  
  sign_.Reshape(bottom[0]->num(), bottom[0]->channels(),
                bottom[0]->height(), bottom[0]->width());
 
  mask_.Reshape(bottom[0]->num(), bottom[0]->channels(),
                bottom[0]->height(), bottom[0]->width());

  plateau_l2_.ReshapeLike(sum_output_);
  
  if (this->layer_param_.l1_loss_param().l2_per_location()) {
    square_layer_->Reshape(diff_top_vec_, square_top_vec_);
    sum_layer_->Reshape(square_top_vec_, sum_top_vec_);
    sqrt_layer_->Reshape(sum_top_vec_, sqrt_top_vec_);    
    caffe_set(sign_.count()/sign_.channels(), Dtype(1), sign_.mutable_cpu_data());
  }
  
}
Esempio n. 4
0
void MarginLossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { 
  // LossLayer<Dtype>::Reshape(bottom, top);
  vector<int> loss_shape(0);  // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);
  lambda_ = this->layer_param_.margin_loss_param().lambda();
  power_ = this->layer_param_.margin_loss_param().power();
}
Esempio n. 5
0
void L1RegLossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { 
  // LossLayer<Dtype>::Reshape(bottom, top);
  vector<int> loss_shape(0);  // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);
  lambda_ = this->layer_param_.l1_reg_loss_param().lambda();
  dim_ = bottom[0]->count() / bottom[0]->num();
}
Esempio n. 6
0
void LocLossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {

	vector<int> tot_loss_shape(0);  // Loss layers output a scalar; 0 axes.
	top[0]->Reshape(tot_loss_shape);

	N = bottom[0]->count();

	vector<int> loss_shape(1);
	loss_shape[0] = N;
	loss_.Reshape(loss_shape);
}
void BatchTripletLossLayer<Dtype>::Reshape(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  vector<int> loss_shape(0);      // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);    // average loss (rank_loss + pair_loss)
  top[1]->Reshape(loss_shape);    // average accuracy rate of all triplets
  if (top.size() == 3) {
    top[2]->Reshape(1, 1, 1, 5);
  }

  int num = bottom[0]->num();
  dist_.Reshape(num, num, 1, 1);
  norm_.Reshape(num, 1, 1, 1);
  aggregator_.reset(new SyncedMemory(num * num * sizeof(Dtype)));
}
Esempio n. 8
0
void CRFWithLossLayer<Dtype>::Reshape(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  CHECK_EQ(bottom.size(), 2)
	  << "CRFWithLossLayer need both feature table and labels as inputs";
  CHECK_EQ(bottom[0]->count(1, 2), feature_num_)
      << "The state num of input feature table doesn't match the state-feature"
      << "number of the CRF"; 
  CHECK_EQ(bottom[0]->count(2), max_seq_length_)
	  << "The input feature table must have a length of max_seq_length_ as defined in crf layer"
      << "for a requirement of alignment in blobs";
  CHECK_EQ(bottom[1]->count(2), max_seq_length_)
      << "The input label sequence should be of the same length as the input feature table";
  CHECK_EQ(bottom[1]->count(1, 2), 1)
      << "The label sequence is a one dimensional vector with each element of it referring to a class number"; 
  
  // Training top: (nbest label, nbest prob); Test top: (-logProb)  
  if (!for_training_) {
	CHECK_EQ(top.size(), 2)
      << "The Top should have two blobs: pred labels, predictive probability";
	vector<int> output_seq_shape = bottom[0]->shape(); 
	output_seq_shape[1] = nbest_; 
	top[0]->Reshape(output_seq_shape); 

  	vector<int> output_prob_shape = bottom[0]->shape(); 
  	output_prob_shape[1] = nbest_; 
  	output_prob_shape[2] = 1;
  	output_prob_shape[3] = 1;
  	top[1]->Reshape(output_prob_shape);
  } else {
    vector<int> loss_shape(1); 	
    loss_shape[0] = bottom[0]->count(0,1); 
    top[0]->Reshape(loss_shape);
  }

  // Do The Transpose operation input bottom , which is important for the RowMajor routine
  Dtype* ptr_buf = buf_bottom_transposed_.mutable_cpu_data();

  // Table size ( each matrix size)
  int ts = bottom[0]->count(1);

  // Transpose to a buffer and make a log energy transition of each element 
  // for following matrix multiplication of LogSumExp version
  for (int i = 0; i < num_; ++i) {
    caffe_cpu_transpose(feature_num_, max_seq_length_, bottom[0]->cpu_data() + i * ts, ptr_buf + i * ts); 	
    caffe_log(ts, buf_bottom_transposed_.cpu_data() + i * ts, ptr_buf);
  }
}
void WarpCTCLossLayer<Dtype>::Reshape(
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  vector<int> loss_shape(0);  // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);
}
void DeepHandModelDofLimitLossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  vector<int> loss_shape(0);
  top[0]->Reshape(loss_shape);
}
void TripletLossLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
  const vector<Blob<Dtype>*>& top) {
  caffe_set(bottom[0]->count(), Dtype(0), bottom_diff_.mutable_cpu_data());
  vector<int> loss_shape(0);
  top[0]->Reshape(loss_shape);
}