Beispiel #1
0
 SumLayerTest()
     : blob_bottom_(new Blob<Dtype>(1, 100, 1, 1)),
       blob_top_(new Blob<Dtype>()) {
   Caffe::set_random_seed(1701);
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_);
 }
void ScaleLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const ScaleParameter& param = this->layer_param_.scale_param();
  if (bottom.size() == 1 && this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else if (bottom.size() == 1) {
    // scale is a learned parameter; initialize it
    axis_ = bottom[0]->CanonicalAxisIndex(param.axis());
    const int num_axes = param.num_axes();
    CHECK_GE(num_axes, -1) << "num_axes must be non-negative, "
                           << "or -1 to extend to the end of bottom[0]";
    if (num_axes >= 0) {
      CHECK_GE(bottom[0]->num_axes(), axis_ + num_axes)
          << "scale blob's shape extends past bottom[0]'s shape when applied "
          << "starting with bottom[0] axis = " << axis_;
    }
    this->blobs_.resize(1);
    const vector<int>::const_iterator& shape_start =
        bottom[0]->shape().begin() + axis_;
    const vector<int>::const_iterator& shape_end =
        (num_axes == -1) ? bottom[0]->shape().end() : (shape_start + num_axes);
    vector<int> scale_shape(shape_start, shape_end);
    this->blobs_[0].reset(new Blob<Dtype>(scale_shape));
    FillerParameter filler_param(param.filler());
    if (!param.has_filler()) {
      // Default to unit (1) filler for identity operation.
      filler_param.set_type("constant");
      filler_param.set_value(1);
    }
    shared_ptr<Filler<Dtype> > filler(GetFiller<Dtype>(filler_param));
    filler->Fill(this->blobs_[0].get());
  }
  if (param.bias_term()) {
    LayerParameter layer_param(this->layer_param_);
    layer_param.set_type("Bias");
    BiasParameter* bias_param = layer_param.mutable_bias_param();
    bias_param->set_axis(param.axis());
    if (bottom.size() > 1) {
      bias_param->set_num_axes(bottom[1]->num_axes());
    } else {
      bias_param->set_num_axes(param.num_axes());
    }
    bias_param->mutable_filler()->CopyFrom(param.bias_filler());
    bias_layer_ = LayerRegistry<Dtype>::CreateLayer(layer_param);
    bias_bottom_vec_.resize(1);
    bias_bottom_vec_[0] = bottom[0];
    bias_layer_->SetUp(bias_bottom_vec_, top);
    bias_param_id_ = this->blobs_.size();
    this->blobs_.resize(bias_param_id_ + 1);
    this->blobs_[bias_param_id_] = bias_layer_->blobs()[0];
    bias_propagate_down_.resize(1, false);
  }
  this->param_propagate_down_.resize(this->blobs_.size(), true);
}
 EuclideanLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
       blob_bottom_label_(new Blob<Dtype>(10, 5, 1, 1)) {
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   filler.Fill(this->blob_bottom_label_);
   blob_bottom_vec_.push_back(blob_bottom_label_);
 }
Beispiel #4
0
 FlattenLayerTest()
     : blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
       blob_top_(new Blob<Dtype>()) {
   Caffe::set_random_seed(1701, Caffe::GetDefaultDevice());
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_);
 }
Beispiel #5
0
TYPED_TEST(UpsampleLayerTest, TestForwardFromPoolOddShape) {
  typedef typename TypeParam::Dtype Dtype;
  int kernel_w = 2;
  int kernel_h = 2;
  Blob<Dtype>* input_blob = new Blob<Dtype>();
  input_blob->Reshape(2,3,5,4);
  FillerParameter filler_param;
  GaussianFiller<Dtype> filler(filler_param);
  filler.Fill(input_blob);
  std::vector<Blob<Dtype>*> pool_bottom_vec;
  pool_bottom_vec.push_back(input_blob);
  LayerParameter layer_param;
  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
  pooling_param->set_kernel_h(kernel_h);
  pooling_param->set_kernel_w(kernel_w);
  pooling_param->set_stride(2);
  pooling_param->set_pool(PoolingParameter_PoolMethod_MAX);
  PoolingLayer<Dtype> pooling_layer(layer_param);
  pooling_layer.SetUp(pool_bottom_vec, this->blob_bottom_vec_);
  EXPECT_EQ(this->blob_bottom_->num(), 2);
  EXPECT_EQ(this->blob_bottom_->channels(), 3);
  EXPECT_EQ(this->blob_bottom_->height(), 3);
  EXPECT_EQ(this->blob_bottom_->width(), 2);
  EXPECT_EQ(this->blob_bottom_mask_->num(), 2);
  EXPECT_EQ(this->blob_bottom_mask_->channels(), 3);
  EXPECT_EQ(this->blob_bottom_mask_->height(), 3);
  EXPECT_EQ(this->blob_bottom_mask_->width(), 2);

  LayerParameter upsample_layer_param;
  UpsampleParameter* upsample_param = upsample_layer_param.mutable_upsample_param();
  upsample_param->set_upsample_h(5);
  upsample_param->set_upsample_w(4);
  UpsampleLayer<Dtype> upsample_layer(upsample_layer_param);
  upsample_layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  EXPECT_EQ(this->blob_top_->num(), 2);
  EXPECT_EQ(this->blob_top_->channels(), 3);
  EXPECT_EQ(this->blob_top_->height(), 5);
  EXPECT_EQ(this->blob_top_->width(), 4);

  pooling_layer.Forward(pool_bottom_vec, this->blob_bottom_vec_);
  upsample_layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);

  const Dtype* top_data = this->blob_top_->cpu_data();
  const Dtype* pool_bottom_data = input_blob->cpu_data();
  int num_zeros = 0;
  for(int i = 0; i < this->blob_top_->count(); ++i) {
    if(top_data[i] != 0) {
      EXPECT_EQ(top_data[i], pool_bottom_data[i]);
    } else {
      ++num_zeros;
    }
  }
  EXPECT_EQ(num_zeros, (5*4-6)*2*3);
}
Beispiel #6
0
 virtual void SetUp() {
   Caffe::set_random_seed(1701);
   blob_bottom_->Reshape(2, 3, 2, 2);
   blob_bottom_mask_->Reshape(2, 3, 2, 2);
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_mask_);
   blob_top_vec_.push_back(blob_top_);
 }
 virtual void SetUp() {
   Caffe::set_random_seed(1703);
   blob_bottom_->Reshape(2, 3, 6, 5);
   // fill the values
   FillerParameter filler_param;
   filler_param.set_value(1.);
   ConstantFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_);
 }
Beispiel #8
0
  void TestBackward(Dtype filler_std) {
    FillerParameter filler_param;
    filler_param.set_std(filler_std);
    GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_);

    LayerParameter layer_param;
    TanHLayer<Dtype> layer(layer_param);
    GradientChecker<Dtype> checker(1e-2, 1e-2, 1701);
    checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
        this->blob_top_vec_);
  }
TYPED_TEST(NeuronLayerTest, TestPReLUGradient) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  PReLULayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  FillerParameter filler_param;
  GaussianFiller<Dtype> filler(filler_param);
  filler.Fill(layer.blobs()[0].get());
  GradientChecker<Dtype> checker(1e-2, 1e-3, 1701, 0., 0.01);
  checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
      this->blob_top_vec_);
}
		CovarianceLayerTest()
			: blob_data_(new Blob<Dtype>(2, 5, 7, 7)),
			blob_top_(new Blob<Dtype>()) {
			// fill the values
			FillerParameter filler_param;
			filler_param.set_value(1);
			GaussianFiller<Dtype> filler(filler_param);
			filler.Fill(this->blob_data_);

			blob_bottom_vec_.push_back(blob_data_);
			blob_top_vec_.push_back(blob_top_);
		}
TEST_F(PreferEarlyExitRuleTest, LongIfAndReturnInsideBlock)
{
    std::string code = "int test(int a) {\n";
    code += "  int i = 2;\n";
    code += "  if (a) {\n";
    code += filler("i *= 2;\n", 2);
    code += "  }\n";
    code += "  return i;\n";
    code += "}\n";
    testRuleOnCode(new PreferEarlyExitRule(), code, 0, 3, 3, 6, 3,
                   PreferEarlyExitRule::getMessage());
}
Beispiel #12
0
TYPED_TEST(BlobMathTest, TestAsum) {
  typedef typename TypeParam::Dtype Dtype;

  // Uninitialized Blob should have asum == 0.
  EXPECT_EQ(0, this->blob_->asum_data());
  EXPECT_EQ(0, this->blob_->asum_diff());
  FillerParameter filler_param;
  filler_param.set_min(-3);
  filler_param.set_max(3);
  UniformFiller<Dtype> filler(filler_param);
  filler.Fill(this->blob_);
  Dtype expected_asum = 0;
  const Dtype* data = this->blob_->cpu_data();
  for (int i = 0; i < this->blob_->count(); ++i) {
    expected_asum += std::fabs(data[i]);
  }
  // Do a mutable access on the current device,
  // so that the asum computation is done on that device.
  // (Otherwise, this would only check the CPU asum implementation.)
  switch (TypeParam::device) {
  case Engine::CPU:
    this->blob_->mutable_cpu_data();
    break;
  case Engine::GPU:
    this->blob_->mutable_gpu_data();
    break;
  default:
    LOG(FATAL) << "Unknown device: " << TypeParam::device;
  }
  EXPECT_NEAR(expected_asum, this->blob_->asum_data(),
              this->epsilon_ * expected_asum);
  EXPECT_EQ(0, this->blob_->asum_diff());

  // Check asum_diff too.
  const Dtype kDiffScaleFactor = 7;
  caffe_cpu_scale(this->blob_->count(), kDiffScaleFactor, data,
                  this->blob_->mutable_cpu_diff());
  switch (TypeParam::device) {
  case Engine::CPU:
    this->blob_->mutable_cpu_diff();
    break;
  case Engine::GPU:
    this->blob_->mutable_gpu_diff();
    break;
  default:
    LOG(FATAL) << "Unknown device: " << TypeParam::device;
  }
  EXPECT_NEAR(expected_asum, this->blob_->asum_data(),
              this->epsilon_ * expected_asum);
  const Dtype expected_diff_asum = expected_asum * kDiffScaleFactor;
  EXPECT_NEAR(expected_diff_asum, this->blob_->asum_diff(),
              this->epsilon_ * expected_diff_asum);
}
  InnerProductForRegularizeLayerTest()
      : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
        blob_top_(new Blob<Dtype>()),
		blob_top_for_reg_(new Blob<Dtype>()) {
    // fill the values
    FillerParameter filler_param;
    UniformFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_);
    blob_bottom_vec_.push_back(blob_bottom_);
    blob_top_vec_.push_back(blob_top_);
    blob_top_vec_.push_back(blob_top_for_reg_);
  }
 InnerProductWithRegularizeLayerTest()
     : blob_bottom_(new Blob<Dtype>(10, 5, 3, 3)),
       blob_top_(new Blob<Dtype>()) ,
       blob_top_loss_(new Blob<Dtype>()) {
   // fill the values
   FillerParameter filler_param;
   UniformFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_);
   blob_top_vec_.push_back(blob_top_loss_);
 }
Beispiel #15
0
 SplitLayerTest()
     : blob_bottom_(new Blob<Dtype>(2, 3, 6, 5)),
       blob_top_a_(new Blob<Dtype>()),
       blob_top_b_(new Blob<Dtype>()) {
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_a_);
   blob_top_vec_.push_back(blob_top_b_);
 }
	void InfogainLossLayerTestForwardPerformance(int num_images, int num_channels, int im_width, int im_height) {

		typedef typename TypeParam::Dtype Dtype;
		LayerParameter layer_param;
		InfogainLossLayer<Dtype> layer(layer_param);

		blob_bottom_data_->Reshape(num_images, num_channels, 1, 1);
		blob_bottom_label_->Reshape(num_images, 1, 1, 1);
		blob_bottom_infogain_->Reshape(1, 1, num_channels, num_channels);

		FillerParameter filler_param;
		UniformFiller<Dtype> filler(filler_param);
		filler.Fill(this->blob_bottom_data_);

		for (int i = 0; i < blob_bottom_label_->count(); ++i) {
			blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5;
		}

		filler_param.set_min(0.1);
		filler_param.set_max(2.0);
		UniformFiller<Dtype> infogain_filler(filler_param);
		infogain_filler.Fill(this->blob_bottom_infogain_);

		blob_bottom_vec_.clear();
		blob_bottom_vec_.push_back(blob_bottom_data_);
		blob_bottom_vec_.push_back(blob_bottom_label_);
		blob_bottom_vec_.push_back(blob_bottom_infogain_);

		layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);

#if defined(USE_CUDA) || defined(USE_OPENCL)
		blob_bottom_data_->mutable_gpu_data();
		blob_bottom_data_->mutable_gpu_diff();
		blob_bottom_label_->mutable_gpu_data();
		blob_bottom_label_->mutable_gpu_diff();
		blob_bottom_infogain_->mutable_gpu_data();
		blob_bottom_infogain_->mutable_gpu_diff();
		blob_top_loss_->mutable_gpu_data();
		blob_top_loss_->mutable_gpu_diff();
#endif

		record r;
		r.type = std::string(typeid(Dtype).name());
		r.num_images = num_images;
		r.num_channels = num_channels;
		r.img_width = im_width;
		r.img_height = im_height;

		BENCH(r, {
			layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_)
			;
		});
 std::string getTestCode(int ifStatementBodyLength)
 {
     const Loop loop = GetParam();
     std::string code = "int test(int a) {\n";
     code += loop.start;
     code += "if (a) {\n";
     code += filler("a *= 2;\n", ifStatementBodyLength);
     code += "}\n";
     code += loop.end;
     code += "return a;\n";
     code += "}\n";
     return code;
 }
 PowerLayerTest()
     : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
       blob_top_(new Blob<Dtype>()) {
   Caffe::set_random_seed(1701);
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler_param.set_mean(0.0);
   filler_param.set_std(0.001); 
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_);
 }
	MultinomialLogisticLossLayerTest() :
			blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)), blob_bottom_label_(
					new Blob<Dtype>(10, 1, 1, 1)) {
		// fill the values
		FillerParameter filler_param;
		PositiveUnitballFiller<Dtype> filler(filler_param);
		filler.Fill(this->blob_bottom_data_);
		blob_bottom_vec_.push_back(blob_bottom_data_);
		for (int i = 0; i < blob_bottom_label_->count(); ++i) {
			blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5;
		}
		blob_bottom_vec_.push_back(blob_bottom_label_);
	}
 LstmLayerTest()
     : blob_bottom_(new Blob<Dtype>(12, 3, 2, 1)),
       blob_bottom2_(new Blob<Dtype>(12, 1, 1, 1)),
       blob_top_(new Blob<Dtype>()) {
   // fill the values
   FillerParameter filler_param;
   filler_param.set_min(-0.1);
   filler_param.set_max(0.1);
   UniformFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   caffe_set<Dtype>(blob_bottom2_->count(), Dtype(0), blob_bottom2_->mutable_cpu_data());
   blob_top_vec_.push_back(blob_top_);
 }
Beispiel #21
0
static REPF(jtrepzdx) {
    A p,q,x;
    P*wp;
    RZ(a&&w);
    if(SPARSE&AT(w)) {
        wp=PAV(w);
        x=SPA(wp,e);
    }
    else x=jt->fill&&AN(jt->fill)?jt->fill:filler(w);
    RZ(p=repeat(ravel(rect(a)),ravel(stitch(IX(wcr?*(wf+AS(w)):1),num[-1]))));
    RZ(q=irs2(w,x,0L,wcr,0L,jtover));
    R irs2(p,q,0L,1L,wcr+!wcr,jtfrom);
}    /* (dense complex) # (dense or sparse) */
 virtual void SetUp() {
   // fill the values
   Caffe::set_random_seed(1701);
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_top_vec_0_.push_back(blob_top_0_);
   blob_top_vec_0_.push_back(blob_top_1_);
   blob_top_vec_1_.push_back(blob_top_0_);
   blob_top_vec_1_.push_back(blob_top_1_);
   blob_top_vec_1_.push_back(blob_top_2_);
   blob_bottom_vec_.push_back(blob_bottom_);
 }
 virtual void SetUp() {
   Caffe::set_random_seed(1701);
   blob_bottom_0_->Reshape(2, 7, 3, 3);
   blob_bottom_1_->Reshape(2, 7, 3, 3);
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_0_);
   filler.Fill(this->blob_bottom_1_);
   blob_bottom_vec_.push_back(blob_bottom_0_);
   blob_bottom_vec_.push_back(blob_bottom_1_);
   blob_top_vec_.push_back(blob_top_);
 }
 SoftmaxWithLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
       blob_bottom_label_(new Blob<Dtype>(10, 1, 1, 1)) {
   // fill the values
   FillerParameter filler_param;
   filler_param.set_std(10);
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   for (int i = 0; i < blob_bottom_label_->count(); ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5;
   }
   blob_bottom_vec_.push_back(blob_bottom_label_);
 }
Beispiel #25
0
TYPED_TEST(LSTMLayerTest, TestGradientNonZeroContBufferSize2) {
  typedef typename TypeParam::Dtype Dtype;
  this->ReshapeBlobs(2, 2);
  FillerParameter filler_param;
  UniformFiller<Dtype> filler(filler_param);
  filler.Fill(&this->blob_bottom_);
  LSTMLayer<Dtype> layer(this->layer_param_);
  GradientChecker<Dtype> checker(1e-2, 1e-3);
  for (int i = 0; i < this->blob_bottom_cont_.count(); ++i) {
    this->blob_bottom_cont_.mutable_cpu_data()[i] = i > 2;
  }
  checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_,
      this->blob_top_vec_, 0);
}
  FlowWarpingLayerTest()
      : blob_im_(new Blob<Dtype>(2, 10, 1000, 1000)),
	blob_disp_(new Blob<Dtype>(2, 2, 1000, 1000)),
        blob_top_(new Blob<Dtype>()) {
    
    // fill the values
    FillerParameter filler_param;
    UniformFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_im_);
    filler.Fill(this->blob_disp_);
    blob_bottom_vec_.push_back(this->blob_im_);
    blob_bottom_vec_.push_back(this->blob_disp_);
    blob_top_vec_.push_back(blob_top_);
  }
 virtual void SetUp() {
   Caffe::set_random_seed(1701);
   blob_bottom_->Reshape(2, 3, 9, 8);
   blob_bottom_2_->Reshape(4, 3, 1024, 765);
   blob_bottom_3_->Reshape(10, 3, 7, 7);
   // fill the values
   FillerParameter filler_param;
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_);
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_bottom_vec_2_.push_back(blob_bottom_2_);
   blob_bottom_vec_3_.push_back(blob_bottom_3_);
   blob_top_vec_.push_back(blob_top_);
 }
Beispiel #28
0
void BiasLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  if (bottom.size() == 1 && this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else if (bottom.size() == 1) {
    // bias is a learned parameter; initialize it
    const BiasParameter& param = this->layer_param_.bias_param();
    const int axis = bottom[0]->CanonicalAxisIndex(param.axis());
    const int num_axes = param.num_axes();
    CHECK_GE(num_axes, -1) << "num_axes must be non-negative, "
                           << "or -1 to extend to the end of bottom[0]";
    if (num_axes >= 0) {
      CHECK_GE(bottom[0]->num_axes(), axis + num_axes)
          << "bias blob's shape extends past bottom[0]'s shape when applied "
          << "starting with bottom[0] axis = " << axis;
    }
    this->blobs_.resize(1);
    const vector<int>::const_iterator& shape_start =
        bottom[0]->shape().begin() + axis;
    const vector<int>::const_iterator& shape_end =
        (num_axes == -1) ? bottom[0]->shape().end() : (shape_start + num_axes);
    vector<int> bias_shape(shape_start, shape_end);
    this->blobs_[0].reset(new Blob<Dtype>(bias_shape));
    shared_ptr<Filler<Dtype> > filler(GetFiller<Dtype>(param.filler()));
    filler->Fill(this->blobs_[0].get());
  }
  this->param_propagate_down_.resize(this->blobs_.size(), true);

#ifdef USE_MLSL
  int ic = bottom[0]->channels();
  int iw = bottom[0]->width();
  int ih = bottom[0]->height();

  int oc = ic; //top[0]->channels();
  int ow = iw; //top[0]->width();
  int oh = ih; //top[0]->height();

  DataType dt = (sizeof(Dtype) == 4)? DT_FLOAT : DT_DOUBLE;
  ComputeOpRegInfo *myRegInfo;
  myRegInfo = new ComputeOpRegInfo(COMP_OP_TYPE_BIAS);
  myRegInfo->SetName(this->layer_param_.name().c_str());
  myRegInfo->AddInputFeatureMap(ic, iw*ih, dt);
  myRegInfo->AddOutputFeatureMap(oc, ow*oh, dt);
  myRegInfo->AddWeights(oc, 1, dt, false);

  myRegInfo->Validate();
  this->layerOp = new ComputeOp(myRegInfo, caffe::internode::data_parallelism);
  delete myRegInfo;
#endif
}
Beispiel #29
0
TYPED_TEST(CropLayerTest, TestCrop5D) {
  typedef typename TypeParam::Dtype Dtype;
  // Add dimension to each bottom for >4D check
  vector<int> bottom_0_shape = this->blob_bottom_0_->shape();
  vector<int> bottom_1_shape = this->blob_bottom_1_->shape();
  bottom_0_shape.push_back(2);
  bottom_1_shape.push_back(1);
  this->blob_bottom_0_->Reshape(bottom_0_shape);
  this->blob_bottom_1_->Reshape(bottom_1_shape);
  FillerParameter filler_param;
  GaussianFiller<Dtype> filler(filler_param);
  filler.Fill(this->blob_bottom_0_);
  filler.Fill(this->blob_bottom_1_);
  // Make layer
  LayerParameter layer_param;
  layer_param.mutable_crop_param()->set_axis(2);
  layer_param.mutable_crop_param()->add_offset(1);
  layer_param.mutable_crop_param()->add_offset(2);
  layer_param.mutable_crop_param()->add_offset(0);
  CropLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  vector<int> bottom_idx = vector<int>(5, 0);
  vector<int> top_idx = vector<int>(5, 0);
  for (int n = 0; n < this->blob_bottom_0_->shape(0); ++n) {
    for (int c = 0; c < this->blob_bottom_0_->shape(1); ++c) {
      for (int z = 0; z < this->blob_bottom_0_->shape(2); ++z) {
        for (int h = 0; h < this->blob_bottom_0_->shape(3); ++h) {
          for (int w = 0; w < this->blob_bottom_0_->shape(4); ++w) {
            if (n < this->blob_top_->shape(0) &&
                c < this->blob_top_->shape(1) &&
                z < this->blob_top_->shape(2) &&
                h < this->blob_top_->shape(3) &&
                w < this->blob_top_->shape(4)) {
              bottom_idx[0] = top_idx[0] = n;
              bottom_idx[1] = top_idx[1] = c;
              bottom_idx[2] = z;
              bottom_idx[3] = h;
              bottom_idx[4] = top_idx[4] = w;
              top_idx[2] = z+1;
              top_idx[3] = h+2;
              EXPECT_EQ(this->blob_top_->data_at(bottom_idx),
                  this->blob_bottom_0_->data_at(top_idx));
            }
          }
        }
      }
    }
  }
}
Beispiel #30
0
A jtrank1ex(J jt,A w,A fs,I mr,AF f1){PROLOG(0041);A y,y0,yw,z;B wb;C*v,*vv;
    I k,mn,n=1,p,*s,wcn,wcr,wf,wk,wr,*ws,wt,yn,yr,*ys,yt;
 RZ(w);
 wt=AT(w);
 if(wt&SPARSE)R sprank1(w,fs,mr,f1);
 wr=AR(w); ws=AS(w); wcr=efr(wr,mr); wf=wr-wcr; wb=ARELATIVE(w);
 if(!wf)R CALL1(f1,w,fs);
 RE(wcn=prod(wcr,wf+ws)); wk=wcn*bp(wt); v=CAV(w)-wk; NEWYW;
 p=wf; s=ws; RE(mn=prod(wf,ws));
 if(AN(w))MOVEYW else RZ(yw=reshape(vec(INT,wcr,ws+wf),filler(w)));
#define VALENCE   1
#define TEMPLATE  0
#include "cr_t.h"
}