void BinaryBoundingLossLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top)
	{
		LossLayer<Dtype>::Reshape(bottom, top);
		vector<int> shape;
		shape.push_back(bottom[0]->num());
		ones_column.Reshape(shape);
		cache_tmp_.ReshapeLike(*bottom[0]);
		square_cache_tmp_.ReshapeLike(*bottom[0]);
		scalar_cache_.ReshapeLike(*bottom[1]);
		ones_.ReshapeLike(*bottom[0]);

		caffe_set(ones_column.count(),
			(Dtype)1.0, 
			ones_column.mutable_cpu_data());
		caffe_set(ones_.count(),
			(Dtype)1.0, ones_.mutable_cpu_data());


	}
Exemple #2
0
void BNLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  frozen_ = this->layer_param_.bn_param().frozen();
  moving_average_ = this->layer_param_.bn_param().moving_average();
  bn_momentum_ = this->layer_param_.bn_param().momentum();
  bn_eps_ = this->layer_param_.bn_param().eps();
  // Initialize parameters
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    if (moving_average_) {
      this->blobs_.resize(4);
    } else {
      this->blobs_.resize(2);
    }
    vector<int> shape;
    shape.push_back(1);
    shape.push_back(bottom[0]->channels());
    // slope
    this->blobs_[0].reset(new Blob<Dtype>(shape));
    shared_ptr<Filler<Dtype> > slope_filler(GetFiller<Dtype>(
        this->layer_param_.bn_param().slope_filler()));
    slope_filler->Fill(this->blobs_[0].get());
    // bias
    this->blobs_[1].reset(new Blob<Dtype>(shape));
    shared_ptr<Filler<Dtype> > bias_filler(GetFiller<Dtype>(
        this->layer_param_.bn_param().bias_filler()));
    bias_filler->Fill(this->blobs_[1].get());
    if (this->blobs_.size() > 2) {
      // moving average mean
      this->blobs_[2].reset(new Blob<Dtype>(shape));
      caffe_set(this->blobs_[2]->count(), Dtype(0),
          this->blobs_[2]->mutable_cpu_data());
      // moving average variance
      this->blobs_[3].reset(new Blob<Dtype>(shape));
      caffe_set(this->blobs_[3]->count(), Dtype(1),
          this->blobs_[3]->mutable_cpu_data());
    }
  }
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
void ReLUModLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  hist_res = 256;
  num_sample_ = 0;
  num_pos_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
  sum_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
  sum_sq_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
  hist_.Reshape(1, 1, hist_res * 2 + 1, bottom[0]->channels());
  sum_prod_.Reshape(1, 1, bottom[0]->channels(), bottom[0]->channels());

  caffe_set(sum_.count(), (Dtype)0, sum_.mutable_cpu_data());  
  caffe_set(sum_sq_.count(), (Dtype)0, sum_sq_.mutable_cpu_data());  
  caffe_set(num_pos_.count(), (unsigned)0, num_pos_.mutable_cpu_data());  
  caffe_set(hist_.count(), (unsigned)0, hist_.mutable_cpu_data());  
  caffe_set(sum_prod_.count(), (Dtype)0, sum_prod_.mutable_cpu_data());  

  string filename = this->layer_param_.name() + "-analysis";
  string cmd = "rm " + filename;
  system(cmd.c_str());
  LOG(INFO) << "ReLUMod: LayerSetUp";
}
Exemple #4
0
	void ConvNormLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
		const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
	{
		caffe_set(conv_top_vec[0]->count(), (Dtype)0, conv_top_vec[0]->mutable_cpu_diff());
		for (int n = 0; n < conv_top_vec[0]->num(); n++)
		{
			caffe_div(norm_top.count(), top[0]->cpu_diff() + top[0]->offset(n),
				norm_top.cpu_data(), conv_top_vec[0]->mutable_cpu_diff()+conv_top_vec[0]->offset(n));
		}

		conv_layer->Backward(conv_top_vec, propagate_down, bottom);
	}
Exemple #5
0
void CropLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  const Dtype* top_diff = top[0]->cpu_diff();
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();

  if (propagate_down[0]) {
    caffe_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
    std::vector<int> indices(top[0]->num_axes(), 0);
    crop_copy(bottom, top, offsets.cpu_data(), indices, 0, top_diff,
        bottom_diff, false);
  }
}
void ConvolutionSKLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
      << "corresponding to (num, channels, height, width)";
  num_ = bottom[0]->num();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with"
    " convolution kernel.";
  // TODO: generalize to handle inputs of different shapes.
  for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) {
    CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num.";
    CHECK_EQ(channels_, bottom[bottom_id]->channels())
        << "Inputs must have same channels.";
    CHECK_EQ(height_, bottom[bottom_id]->height())
        << "Inputs must have same height.";
    CHECK_EQ(width_, bottom[bottom_id]->width())
        << "Inputs must have same width.";
  }
  // Shape the tops.
  compute_output_shape();
  for (int top_id = 0; top_id < top.size(); ++top_id) {
    top[top_id]->Reshape(num_, num_output_, height_out_, width_out_);
  }
  if (reverse_dimensions()) {
    conv_in_height_ = height_out_;
    conv_in_width_ = width_out_;
    conv_out_spatial_dim_ = height_ * width_;
  } else {
    conv_in_height_ = height_;
    conv_in_width_ = width_;
    conv_out_spatial_dim_ = height_out_ * width_out_;
  }
  kernel_dim_ = conv_in_channels_ * kernel_h_ * kernel_w_;
  weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_;
  col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_;
  output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_;
  // The im2col result buffer will only hold one image at a time to avoid
  // overly large memory usage. In the special case of 1x1 convolution
  // it goes lazily unused to save memory.
  if (reverse_dimensions()) {
    col_buffer_.Reshape(1, kernel_dim_, height_, width_);
  } else {
    col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_);
  }
  // Set up the all ones "bias multiplier" for adding biases by BLAS
  if (bias_term_) {
    vector<int> bias_multiplier_shape(1, height_out_ * width_out_);
    bias_multiplier_.Reshape(bias_multiplier_shape);
    caffe_set(bias_multiplier_.count(), Dtype(1),
        bias_multiplier_.mutable_cpu_data());
  }
}
Exemple #7
0
  virtual void Fill(Blob<Dtype>* blob) {
    CHECK(blob->count());
    Dtype* blob_data = blob->mutable_cpu_data();
    caffe_set(blob->count(), Dtype(0), blob_data);

    int kernel_area = static_cast<Dtype>(blob->height()*blob->width());
    int channels = blob->channels();
    int num = blob->num();

    for (int n=0; n < num && n < channels; ++n) {
      Dtype curr_val;
      if (this->filler_param_.diag_val_size() > n)
        curr_val = this->filler_param_.diag_val(n);
      else
        curr_val = 1;
      curr_val /= static_cast<Dtype>(kernel_area);
      caffe_set(kernel_area, curr_val, blob_data + kernel_area * (channels * n + n));
    }
    CHECK_EQ(this->filler_param_.sparse(), -1)
         << "Sparsity not supported by this Filler.";
  }
void deinterpolate_cpu(const Dtype* input, const unsigned int* indices,
    const int input_size, const int output_size, const int channels,
    Dtype* output) {
  caffe_set(output_size*channels, Dtype(0), output);
  for (int c = 0; c < channels; ++c) {
    for (int i = 0; i < input_size; ++i) {
      output[indices[i]] += input[i];
    }
    input += input_size;
    output += output_size;
  }
}
	void TripletLossLayer<Dtype>::Forward_cpu(
		const vector<Blob<Dtype>*> & bottom, const vector<Blob<Dtype>*> & top){
		int count = bottom[0]->count();//count= n * c * h * w
//		const Dtype* sampleW = bottom[3]->cpu_data(); // 1
		caffe_sub(
			count, 
			bottom[0]->cpu_data(), // a
			bottom[1]->cpu_data(), //p
			diff_ap_.mutable_cpu_data()); // diff_ap_= a - p
		caffe_sub(
			count,
			bottom[0]->cpu_data(), //a
			bottom[2]->cpu_data(), //n
			diff_an_.mutable_cpu_data()); // diff_an_ = a - n
		caffe_sub(
			count, 
			bottom[1]->cpu_data(), //p
			bottom[2]->cpu_data(), //n
			diff_pn_.mutable_cpu_data() // diff_pn_ = p - n
			);
		const int channels = bottom[0]->channels();
		Dtype margin = this->layer_param_.triplet_loss_param().margin();// alpha
		Dtype loss(0.0); //record the  loss of this batch.
		for(int i = 0; i < bottom[0]->num(); ++i) {//for all triplet
			dist_sq_ap_.mutable_cpu_data()[i] = caffe_cpu_dot(
				channels, diff_ap_.cpu_data() + (i*channels), diff_ap_.cpu_data() + (i * channels));
			dist_sq_an_.mutable_cpu_data()[i] = caffe_cpu_dot(
				channels, diff_an_.cpu_data() + (i * channels), diff_an_.cpu_data() + (i * channels));
			//mdist= one triplet loss
			Dtype mdist =  std::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0));
			loss += mdist;
			if(mdist == Dtype(0)){
				caffe_set(channels, Dtype(0), diff_ap_.mutable_cpu_data() + (i * channels));
				caffe_set(channels, Dtype(0), diff_an_.mutable_cpu_data() + (i * channels));
				caffe_set(channels, Dtype(0), diff_pn_.mutable_cpu_data() + (i * channels));
			}
		}
		loss = loss/static_cast<Dtype>(bottom[0]->num())/Dtype(2);
		top[0]->mutable_cpu_data()[0] = loss;
	}
void DeconvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  const Dtype* weight = this->blobs_[0]->cpu_data();
  Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff();
  if (this->param_propagate_down_[0]) {
    caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
  }
  if (this->bias_term_ && this->param_propagate_down_[1]) {
    caffe_set(this->blobs_[1]->count(), Dtype(0),
        this->blobs_[1]->mutable_cpu_diff());
  }
  for (int i = 0; i < top.size(); ++i) {
    const Dtype* top_diff = top[i]->cpu_diff();
    const Dtype* bottom_data = bottom[i]->cpu_data();
    Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
    // Bias gradient, if necessary.
    if (this->bias_term_ && this->param_propagate_down_[1]) {
      Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff();
      for (int n = 0; n < this->num_; ++n) {
        this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_);
      }
    }
    if (this->param_propagate_down_[0] || propagate_down[i]) {
      for (int n = 0; n < this->num_; ++n) {
        // Gradient w.r.t. weight. Note that we will accumulate diffs.
        if (this->param_propagate_down_[0]) {
          this->weight_cpu_gemm(top_diff + n * this->top_dim_,
              bottom_data + n * this->bottom_dim_, weight_diff);
        }
        // Gradient w.r.t. bottom data, if necessary, reusing the column buffer
        // we might have just computed above.
        if (propagate_down[i]) {
          this->forward_cpu_gemm(top_diff + n * this->top_dim_, weight,
              bottom_diff + n * this->bottom_dim_,
              this->param_propagate_down_[0]);
        }
      }
    }
  }
}
Exemple #11
0
  void BNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
    top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
        bottom[0]->height(), bottom[0]->width());
    if (top.size() > 1) {
        // top blob for batch mean
        top[1]->Reshape(1, C_, 1, 1);
    }
    if (top.size() > 2) {
        // top blob for batch variance
        top[2]->Reshape(1, C_, 1, 1);
    }

    x_norm_.Reshape(bottom[0]->num(), bottom[0]->channels(),
        bottom[0]->height(), bottom[0]->width());

    // mean
    spatial_mean_.Reshape(N_, C_, 1, 1);
    batch_mean_.Reshape(1, C_, 1, 1);
    // variance
    spatial_variance_.Reshape(N_, C_, 1, 1);
    batch_variance_.Reshape(1, C_, 1, 1);
    // buffer blob
    buffer_blob_.Reshape(N_, C_, H_, W_);

    // fill spatial multiplier
    spatial_sum_multiplier_.Reshape(1, 1, H_, W_);
    Dtype* spatial_multipl_data = spatial_sum_multiplier_.mutable_cpu_data();
    caffe_set(spatial_sum_multiplier_.count(), Dtype(1),
        spatial_multipl_data);
    caffe_set(spatial_sum_multiplier_.count(), Dtype(0),
        spatial_sum_multiplier_.mutable_cpu_diff());
    // fill batch multiplier
    batch_sum_multiplier_.Reshape(N_, 1, 1, 1);
    Dtype* batch_multiplier_data = batch_sum_multiplier_.mutable_cpu_data();
    caffe_set(batch_sum_multiplier_.count(), Dtype(1),
        batch_multiplier_data);
    caffe_set(batch_sum_multiplier_.count(), Dtype(0),
        batch_sum_multiplier_.mutable_cpu_diff());
  }
void MovingNormalizeLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
                                          const vector<Blob<Dtype>*>& top) {
  top[0]->ReshapeLike(*bottom[0]);
  squared_.ReshapeLike(*bottom[0]);
  if (top.size() == 2) {
    top[1]->Reshape({ 1 });
  }
  norm_.Reshape(bottom[0]->num(), 1,
                bottom[0]->height(), bottom[0]->width());
  sum_multiplier_.Reshape(bottom[0]->num(), 1,
                          bottom[0]->height(), bottom[0]->width());
  caffe_set(sum_multiplier_.count(), Dtype(1), sum_multiplier_.mutable_cpu_data());
}
void SparseInnerProductLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // The top shape will M_ * N_
  vector<int> top_shape(2, M_);
  top_shape[1] = N_;
  top[0]->Reshape(top_shape);
  // Set up the bias multiplier
  if (bias_term_) {
    vector<int> bias_shape(1, M_);
    bias_multiplier_.Reshape(bias_shape);
    caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data());
  }
}
Exemple #14
0
 virtual void Fill(Blob<Dtype>* blob) {
   CHECK(blob->count());
   int fan_in = blob->count() / blob->num();
   int fan_out = blob->count() / blob->channels();
   CHECK_EQ(fan_in, fan_out);
   Dtype* blob_data = blob->mutable_cpu_data();
   caffe_set(blob->count(), Dtype(0), blob_data);
   for (int i = 0; i < blob->num(); i++) {
     blob_data[i * blob->channels() + i] = Dtype(1);
   }
   CHECK_EQ(this->filler_param_.sparse(), -1)
     << "Sparsity not supported by this Filler.";
 }
void InnerProductLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
                                       const vector<Blob<Dtype>*>& top) {
    // Figure out the dimensions
    M_ = bottom[0]->num();
    CHECK_EQ(bottom[0]->count() / bottom[0]->num(), K_) << "Input size "
            "incompatible with inner product parameters.";
    top[0]->Reshape(bottom[0]->num(), N_, 1, 1);
    // Set up the bias multiplier
    if (bias_term_) {
        bias_multiplier_.Reshape(1, 1, 1, M_);
        caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data());
    }
}
Exemple #16
0
void FilterLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[bottom.size() - 1]) {
    LOG(FATAL) << this->type()
               << "Layer cannot backpropagate to filter index inputs";
  }
  for (int i = 0; i < top.size(); i++) {
    // bottom[last] is the selector and never needs backpropagation
    // so we can iterate over top vector because top.size() == bottom.size() -1
    if (propagate_down[i]) {
      const int dim = top[i]->count() / top[i]->shape(0);
      int next_to_backward_offset = 0;
      int batch_offset = 0;
      int data_offset_bottom = 0;
      int data_offset_top = 0;
      for (int n = 0; n < bottom[i]->shape(0); n++) {
        data_offset_bottom = n * dim;
        if (next_to_backward_offset >= indices_to_forward_.size()) {
          // we already visited all items that were been forwarded, so
          // just set to zero remaining ones
          caffe_set(dim, Dtype(0),
              bottom[i]->mutable_cpu_diff() + data_offset_bottom);
        } else {
          batch_offset = indices_to_forward_[next_to_backward_offset];
          if (n != batch_offset) {  // this data was not been forwarded
            caffe_set(dim, Dtype(0),
                bottom[i]->mutable_cpu_diff() + data_offset_bottom);
          } else {  // this data was been forwarded
            data_offset_top = next_to_backward_offset * dim;
            next_to_backward_offset++;  // point to next forwarded item index
            caffe_cpu_copy(dim, top[i]->mutable_cpu_diff() + data_offset_top,
                bottom[i]->mutable_cpu_diff() + data_offset_bottom);
          }
        }
      }
    }
  }
}
void BilinearPatchFastLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  caffe_set(bottom[0]->num()*bottom[0]->channels()*bottom[0]->height()*bottom[0]->width(), Dtype(0.0), bottom[0]->mutable_cpu_diff());
  caffe_set(bottom[1]->num()*bottom[1]->channels()*bottom[1]->height()*bottom[1]->width(), Dtype(0.0), bottom[1]->mutable_cpu_diff());


  for (int n = 0; n < bottom[0]->num(); n++){

    for(int i = 0; i < poolingFieldsNum; i++){
      if (propagate_down[0]) {
        
        multiplyAllChannelsByMask(bottom[1]->cpu_data() + bottom[1]->channels() * bottom[1]->height() * bottom[1]->width() * n, bottom[2]->cpu_data() + bottom[2]->channels() * bottom[2]->height() * bottom[2]->width() * n, i, masked_buffer2.mutable_cpu_data(), bottom[1]->height()*bottom[1]->width(), bottom[1]->channels());

        caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, bottom[0]->channels(), bottom[0]->width()*bottom[0]->height(), bottom[1]->channels(),(Dtype)1., top[0]->cpu_diff() + n * top[0]->channels()  + i * bottom[0]->channels() * bottom[1]->channels(), masked_buffer2.cpu_data(), (Dtype)0., dlda_buffer.mutable_cpu_diff());
	
	
	multiplyAllChannelsByMask(dlda_buffer.cpu_diff(), bottom[2]->cpu_data() + bottom[2]->channels() * bottom[2]->height() * bottom[2]->width() * n, i,dlda_buffer.mutable_cpu_diff(), bottom[0]->height()*bottom[0]->width(), bottom[0]->channels());

        caffe_add(bottom[0]->channels()*bottom[0]->height()*bottom[0]->width(), dlda_buffer.cpu_diff(), bottom[0]->cpu_diff() + bottom[0]->channels() * bottom[0]->height() * bottom[0]->width() * n, bottom[0]->mutable_cpu_diff() + bottom[0]->channels() * bottom[0]->height() * bottom[0]->width() * n);

      }
	
      if (propagate_down[1]) {

         multiplyAllChannelsByMask(bottom[0]->cpu_data() + bottom[0]->channels() * bottom[0]->height() * bottom[0]->width() * n, bottom[2]->cpu_data() + bottom[2]->channels() * bottom[2]->height() * bottom[2]->width() * n, i, masked_buffer1.mutable_cpu_data(), bottom[0]->height()*bottom[0]->width(), bottom[0]->channels());
        
        caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, bottom[1]->channels(), bottom[1]->width()*bottom[1]->height(), bottom[0]->channels(),(Dtype)1., top[0]->cpu_diff() + n * top[0]->channels()  + i * bottom[0]->channels() * bottom[1]->channels(), masked_buffer1.cpu_data(), (Dtype)0., dldb_buffer.mutable_cpu_diff());


	multiplyAllChannelsByMask(dldb_buffer.cpu_diff(), bottom[2]->cpu_data() + bottom[2]->channels() * bottom[2]->height() * bottom[2]->width() * n, i,dldb_buffer.mutable_cpu_diff(), bottom[1]->height()*bottom[1]->width(), bottom[1]->channels());

        caffe_add(bottom[1]->channels()*bottom[1]->height()*bottom[1]->width(), dldb_buffer.cpu_diff(), bottom[1]->cpu_diff() + bottom[1]->channels() * bottom[1]->height() * bottom[1]->width() * n, bottom[1]->mutable_cpu_diff() + bottom[1]->channels() * bottom[1]->height() * bottom[1]->width() * n);

      }
    }
  }

}
	void TripletClipHingeLossLayer<Dtype>::
		average_hashing(const vector<Blob<Dtype>*>& bottom){

			int batch_size = bottom[0]->num() / frame_num;
			caffe_set(batch_size*dim, Dtype(0.0), ave_or.mutable_cpu_data());
			caffe_set(batch_size*dim, Dtype(0.0), ave_si.mutable_cpu_data());
			caffe_set(batch_size*dim, Dtype(0.0), ave_di.mutable_cpu_data());

			for (int i = 0; i < batch_size; ++i){
				for (int j = 0; j < frame_num; ++j){
					int index = i*frame_num*dim + j*dim;
					caffe_add(dim, bottom[0]->cpu_data() + index,
						ave_or.cpu_data() + i*dim, ave_or.mutable_cpu_data() + i*dim);
					caffe_add(dim, bottom[1]->cpu_data() + index,
						ave_si.cpu_data() + i*dim, ave_si.mutable_cpu_data() + i*dim);
					caffe_add(dim, bottom[2]->cpu_data() + index,
						ave_di.cpu_data() + i*dim, ave_di.mutable_cpu_data() + i*dim);
				}
				caffe_scal(dim, 1 / Dtype(frame_num), ave_or.mutable_cpu_data() + i*dim);
				caffe_scal(dim, 1 / Dtype(frame_num), ave_si.mutable_cpu_data() + i*dim);
				caffe_scal(dim, 1 / Dtype(frame_num), ave_di.mutable_cpu_data() + i*dim);
			}
		}
Exemple #19
0
  void BNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
                               const vector<Blob<Dtype>*>& top) {
    num_ = bottom[0]->num();
    channels_ = bottom[0]->channels();
    height_ = bottom[0]->height();
    width_ = bottom[0]->width();

    top[0]->ReshapeLike(*(bottom[0]));

    broadcast_buffer_.ReshapeLike(*(bottom[0]));
    spatial_statistic_.Reshape(num_, channels_, 1, 1);
    batch_statistic_.Reshape(1, channels_, 1, 1);

    x_norm_.ReshapeLike(*(bottom[0]));
    x_inv_std_.ReshapeLike(batch_statistic_);

    spatial_sum_multiplier_.Reshape(1, 1, height_, width_);
    caffe_set(spatial_sum_multiplier_.count(), Dtype(1),
              spatial_sum_multiplier_.mutable_cpu_data());
    batch_sum_multiplier_.Reshape(num_, 1, 1, 1);
    caffe_set(batch_sum_multiplier_.count(), Dtype(1),
              batch_sum_multiplier_.mutable_cpu_data());
  }
Exemple #20
0
void EmbedLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // Figure out the dimensions
  M_ = bottom[0]->count();
  vector<int> top_shape = bottom[0]->shape();
  top_shape.push_back(N_);
  top[0]->Reshape(top_shape);
  // Set up the bias multiplier
  if (bias_term_) {
    vector<int> bias_shape(1, M_);
    bias_multiplier_.Reshape(bias_shape);
    caffe_set(M_, Dtype(1), bias_multiplier_.mutable_cpu_data());
  }
}
Exemple #21
0
void PowerLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[0]) {
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const int count = bottom[0]->count();
    const Dtype* top_diff = top[0]->cpu_diff();
    if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
      caffe_set(count, diff_scale_, bottom_diff);
    } else {
      const Dtype* bottom_data = bottom[0]->cpu_data();
      // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
      //               = diff_scale * y / (shift + scale * x)
      if (power_ == Dtype(2)) {
        // Special case for y = (shift + scale * x)^2
        //     -> dy/dx = 2 * scale * (shift + scale * x)
        //              = diff_scale * shift + diff_scale * scale * x
        caffe_cpu_axpby(count, diff_scale_ * scale_, bottom_data,
            Dtype(0), bottom_diff);
        if (shift_ != Dtype(0)) {
          caffe_add_scalar(count, diff_scale_ * shift_, bottom_diff);
        }
      } else if (shift_ == Dtype(0)) {
        // Special case for y = (scale * x)^power
        //     -> dy/dx = scale * power * (scale * x)^(power - 1)
        //              = scale * power * (scale * x)^power * (scale * x)^(-1)
        //              = power * y / x
        const Dtype* top_data = top[0]->cpu_data();
        caffe_div(count, top_data, bottom_data, bottom_diff);
        caffe_scal(count, power_, bottom_diff);
      } else {
        caffe_copy(count, bottom_data, bottom_diff);
        if (scale_ != Dtype(1)) {
          caffe_scal(count, scale_, bottom_diff);
        }
        if (shift_ != Dtype(0)) {
          caffe_add_scalar(count, shift_, bottom_diff);
        }
        const Dtype* top_data = top[0]->cpu_data();
        caffe_div<Dtype>(count, top_data, bottom_diff, bottom_diff);
        if (diff_scale_ != Dtype(1)) {
          caffe_scal(count, diff_scale_, bottom_diff);
        }
      }
    }
    if (diff_scale_ != Dtype(0)) {
      caffe_mul(count, top_diff, bottom_diff, bottom_diff);
    }
  }
}
Exemple #22
0
void col2im3d_cpu(const Dtype* data_col, const int channels,
    const int depth, const int height, const int width,
    const int kernel_d, const int kernel_h, const int kernel_w,
    const int pad_d, const int pad_h, const int pad_w,
    const int stride_d, const int stride_h, const int stride_w,
    const int dilation_d, const int dilation_h, const int dilation_w,
    Dtype* data_im) {
  // Implicit dilated patch
  long dil_patch_h = (kernel_h - 1) * dilation_h + 1;
  long dil_patch_w = (kernel_w - 1) * dilation_w + 1;
  long dil_patch_d = (kernel_d - 1) * dilation_d + 1;
  long height_col = (height + 2 * pad_h - dil_patch_h) / stride_h + 1;
  long width_col = (width + 2 * pad_w - dil_patch_w) / stride_w + 1;
  long depth_col = (depth + 2 * pad_d - dil_patch_d) / stride_d + 1;
  long num_kernels = channels * height * width * depth;
  long chunk_len = kernel_h * kernel_w * kernel_d;

  caffe_set(num_kernels, Dtype(0), data_im);

  #ifdef _OPENMP
  #pragma omp parallel for if (channels > 1)
  #endif
  for (long c_im = 0; c_im < channels; ++c_im) {
    for (long c = c_im * chunk_len; c < chunk_len * (c_im + 1); ++c) {
      long w_offset = c % kernel_w;
      long h_offset = (c / kernel_w) % kernel_h;
      long d_offset = (c / kernel_w / kernel_h) % kernel_d;
 
      long dc0 = d_offset * dilation_d - pad_d;
      long hc0 = h_offset * dilation_h - pad_h;
      long wc0 = w_offset * dilation_w - pad_w;
      for (long d = 0; d < depth_col; ++d) {
        long d_pad = d * stride_d + dc0;
        for (long h = 0; h < height_col; ++h) {
          long h_pad = h * stride_h + hc0;
          for (long w = 0; w < width_col; ++w) {
            long w_pad = w * stride_w + wc0;

            if (((unsigned long)h_pad < (unsigned long)height) &&
                ((unsigned long)w_pad < (unsigned long)width) &&
                ((unsigned long)d_pad < (unsigned long)depth)) {
              data_im[((c_im * depth + d_pad) * height + h_pad) * width + w_pad] +=
                data_col[((c * depth_col + d) * height_col + h) * width_col + w];
            }
          }
        }
      }
    }
  }
}
Exemple #23
0
void BNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
    // reshape blob
	top[0]->Reshape(num_, channels_, height_, width_);
	x_norm_.Reshape(num_, channels_, height_, width_);
	x_std_.Reshape(1, channels_, 1, 1);

	// statistic
	spatial_statistic_.Reshape(num_, channels_, 1, 1);
	batch_statistic_.Reshape(1, channels_, 1, 1);

	// buffer blob
	buffer_blob_.Reshape(num_, channels_, height_, width_);

	// fill spatial multiplier
	spatial_sum_multiplier_.Reshape(1, 1, height_, width_);
	Dtype* spatial_multiplier_data = spatial_sum_multiplier_.mutable_cpu_data();
	caffe_set(spatial_sum_multiplier_.count(), Dtype(1), spatial_multiplier_data);
	// fill batch multiplier
	batch_sum_multiplier_.Reshape(num_, 1, 1, 1);
	Dtype* batch_multiplier_data = batch_sum_multiplier_.mutable_cpu_data();
	caffe_set(batch_sum_multiplier_.count(), Dtype(1), batch_multiplier_data);
}
	TYPED_TEST(Scale2LayerTest, TestForward)
	{
		typedef typename TypeParam::Dtype Dtype;
		LayerParameter layer_param;
		ConvolutionParameter* conv_param = layer_param.mutable_convolution_param();
		conv_param->set_num_output(8);
		Scale2Layer<Dtype> layer(layer_param);
		caffe_set(blob_bottom_->count(), (Dtype)1, blob_bottom_->mutable_cpu_data());
		layer.SetUp(blob_bottom_vec_, blob_top_vec_);

		Dtype* param0 = layer.blobs()[0]->mutable_cpu_data();
		caffe_set(layer.blobs()[0]->count(), (Dtype)0, param0);
		param0[1] = 1;
		param0[4] = 1;
		param0[5] = 1;
		param0[9] = 1;
		Dtype* param1 = layer.blobs()[1]->mutable_cpu_data();
		caffe_set(layer.blobs()[1]->count(), (Dtype)-1, param1);
		param1[1] = 3;
		param1[4] = 7;
		param1[5] = 4;
		param1[9] = 5;
		layer.Forward(blob_bottom_vec_, blob_top_vec_);
		const Dtype min_precision = 1e-5;
		int ch = blob_top_->channels();
		int height = blob_top_->height();
		int width = blob_top_->width();
		for (int i = 0; i < blob_top_->count(); i++)
		{
			int ch_idx = (i / width / height) % ch;
			if (ch_idx == 3 || ch_idx == 7 || ch_idx == 4 || ch_idx == 5)
				EXPECT_NEAR(blob_top_->mutable_cpu_data()[i], 1, min_precision);
			else
				EXPECT_NEAR(blob_top_->mutable_cpu_data()[i], -1, min_precision);

		}
	}
Exemple #25
0
void LRNLayer<Dtype>::CrossChannelForward_cpu(
    const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = (*top)[0]->mutable_cpu_data();
  Dtype* scale_data = scale_.mutable_cpu_data();
  // start with the constant value
  for (int i = 0; i < scale_.count(); ++i) {
    scale_data[i] = 1.;
  }
  Blob<Dtype> padded_square(1, channels_ + size_ - 1, height_, width_);
  Dtype* padded_square_data = padded_square.mutable_cpu_data();
  caffe_set(padded_square.count(), Dtype(0), padded_square_data);
  Dtype alpha_over_size = alpha_ / size_;
  // go through the images
  for (int n = 0; n < num_; ++n) {
    // compute the padded square
    caffe_sqr(channels_ * height_ * width_,
        bottom_data + bottom[0]->offset(n),
        padded_square_data + padded_square.offset(0, pre_pad_));
    // Create the first channel scale
    for (int c = 0; c < size_; ++c) {
      caffe_axpy<Dtype>(height_ * width_, alpha_over_size,
          padded_square_data + padded_square.offset(0, c),
          scale_data + scale_.offset(n, 0));
    }
    for (int c = 1; c < channels_; ++c) {
      // copy previous scale
      caffe_copy<Dtype>(height_ * width_,
          scale_data + scale_.offset(n, c - 1),
          scale_data + scale_.offset(n, c));
      // add head
      caffe_axpy<Dtype>(height_ * width_, alpha_over_size,
          padded_square_data + padded_square.offset(0, c + size_ - 1),
          scale_data + scale_.offset(n, c));
      // subtract tail
      caffe_axpy<Dtype>(height_ * width_, -alpha_over_size,
          padded_square_data + padded_square.offset(0, c - 1),
          scale_data + scale_.offset(n, c));
    }
    // for (int i = 0; i < scale_.count(); ++i) {
    // 	if (scale_data[i] < 0 )
    // 	  LOG(FATAL) << "found negative norm term " << scale_data[i] << " @ " << i;
    //   }
  }

  // In the end, compute output
  caffe_powx<Dtype>(scale_.count(), scale_data, -beta_, top_data);
  caffe_mul<Dtype>(scale_.count(), top_data, bottom_data, top_data);
}
Exemple #26
0
void MVNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
                              vector<Blob<Dtype>*>* top) {
    (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
                       bottom[0]->height(), bottom[0]->width());
    mean_.Reshape(bottom[0]->num(), bottom[0]->channels(),
                  1, 1);
    variance_.Reshape(bottom[0]->num(), bottom[0]->channels(),
                      1, 1);
    temp_.Reshape(bottom[0]->num(), bottom[0]->channels(),
                  bottom[0]->height(), bottom[0]->width());
    sum_multiplier_.Reshape(1, 1,
                            bottom[0]->height(), bottom[0]->width());
    Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
    caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data);
}
Exemple #27
0
 /**
  * Called by SetUp to initialize the weights associated with any top blobs in
  * the loss function. Store non-zero loss weights in the diff blob.
  */
 inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
   const int num_loss_weights = layer_param_.loss_weight_size();
   if (num_loss_weights) {
     CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
         "unspecified or specified once per top blob.";
     for (int top_id = 0; top_id < top.size(); ++top_id) {
       const Dtype loss_weight = layer_param_.loss_weight(top_id);
       if (loss_weight == Dtype(0)) { continue; }
       this->set_loss(top_id, loss_weight);
       const int count = top[top_id]->count();
       Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
       caffe_set(count, loss_weight, loss_multiplier);
     }
   }
 }
void SoftmaxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  softmax_axis_ =
      bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis());
  top[0]->ReshapeLike(*bottom[0]);
  vector<int> mult_dims(1, bottom[0]->shape(softmax_axis_));
  sum_multiplier_.Reshape(mult_dims);
  Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
  caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data);
  outer_num_ = bottom[0]->count(0, softmax_axis_);
  inner_num_ = bottom[0]->count(softmax_axis_ + 1);
  vector<int> scale_dims = bottom[0]->shape();
  scale_dims[softmax_axis_] = 1;
  scale_.Reshape(scale_dims);
}
Exemple #29
0
	void ConvNormLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top)
	{
		CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, "
			<< "corresponding to (num, channels, height, width)";

		conv_layer.reset(new ConvolutionLayer<Dtype>(this->layer_param()));
		conv_top_vec.push_back(&conv_top);
		conv_layer->SetUp(bottom, conv_top_vec);

		LayerParameter conv_param(this->layer_param());
		conv_param.mutable_convolution_param()->set_bias_term(false);
		conv_param.mutable_convolution_param()->mutable_weight_filler()->set_type("constant");
		conv_param.mutable_convolution_param()->mutable_weight_filler()->set_value(1);
		//conv_param.mutable_convolution_param()->set_num_output(1);

		norm_bottom.Reshape(1, 1, bottom[0]->height(), bottom[0]->width());
		caffe_set(norm_bottom.count(), (Dtype)1, norm_bottom.mutable_cpu_data());
		norm_top_vec.push_back(&norm_top);
		norm_bottom_vec.push_back(&norm_bottom);
		
		norm_layer.reset(new ConvolutionLayer<Dtype>(conv_param));
		norm_layer->SetUp(norm_bottom_vec, norm_top_vec);
		norm_layer->Forward(norm_bottom_vec, norm_top_vec);

		bool bias_term = this->layer_param_.convolution_param().bias_term();
		if (this->blobs_.size() > 0) {
			LOG(INFO) << "Skipping parameter initialization";
		}
		else {
			if (bias_term) {
				this->blobs_.resize(2);
			}
			else {
				this->blobs_.resize(1);
			}
			this->blobs_[0].reset(new Blob<Dtype>(conv_layer->blobs()[0]->shape()));
			this->blobs_[0]->ShareData(*conv_layer->blobs()[0].get());
			this->blobs_[0]->ShareDiff(*conv_layer->blobs()[0].get());

			if (bias_term)
			{
				this->blobs_[1].reset(new Blob<Dtype>(conv_layer->blobs()[1]->shape()));
				this->blobs_[1]->ShareData(*conv_layer->blobs()[1].get());
				this->blobs_[1]->ShareDiff(*conv_layer->blobs()[1].get());
			}
		}
	}
void SubStackFixLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* top_data = top[0]->mutable_cpu_data();
  const int inner_num=bottom[0]->height()*bottom[0]->width()*bottom[0]->channels();
  for (int e1=0;e1<sweepern_;++e1){
          caffe_set(inner_num, Dtype(0), top_data);
  	   for (int e2=0;e2<sweepern_;++e2){
	       if(e1!=e2){
		  	    caffe_axpy(inner_num, Dtype(1.0), bottom_data , top_data);
	        }
           bottom_data += bottom[0]->offset(1, 0);
  	   }
         top_data += top[0]->offset(1, 0);
  }
}