示例#1
0
ImageBatch Convolution::ActivationBackward(ImageBatch diff, ImageBatch top, ImageBatch bottom, ActivationAlgorithm algorithm) {
  CHECK_EQ(diff.Size(), top.Size()) << "inputs sizes mismatch";
  CHECK_EQ(diff.Size(), bottom.Size()) << "inputs sizes mismatch";
  ActivationBackwardOp* op = new ActivationBackwardOp();
  op->closure.algorithm = algorithm;
  return NArray::ComputeOne({diff, top, bottom}, diff.Size(), op);
}
示例#2
0
ImageBatch Convolution::PoolingBackward(ImageBatch diff, ImageBatch top, ImageBatch bottom, PoolingInfo info) {
  CHECK_EQ(diff.Size(), top.Size()) << "inputs sizes mismatch";
  CHECK_EQ(diff.GetNumImages(), bottom.GetNumImages()) << "#images mismatch";
  CHECK_EQ(diff.GetNumFeatureMaps(), bottom.GetNumFeatureMaps()) << "#channels mismatch";

  int pooled_height = (bottom.GetHeight() + 2 * info.pad_height - info.height + info.stride_vertical - 1) / info.stride_vertical + 1;
  int pooled_width = (bottom.GetWidth() + 2 * info.pad_width - info.width + info.stride_horizontal - 1) / info.stride_horizontal + 1;
  if (0 <= (pooled_height - 1) * info.stride_vertical - bottom.GetHeight() - info.pad_height) {
    --pooled_height;
  }
  if (0 <= (pooled_width - 1) * info.stride_horizontal - bottom.GetWidth() - info.pad_width) {
    --pooled_width;
  }

  CHECK_EQ(top.GetHeight(), pooled_height) << "height mismatch";
  CHECK_EQ(top.GetWidth(), pooled_width) << "width mismatch";

  PoolingBackwardOp* op = new PoolingBackwardOp();
  op->closure = {
    info.algorithm,
    info.height,
    info.width,
    info.stride_vertical,
    info.stride_horizontal,
	info.pad_height,
	info.pad_width
  };
  return NArray::ComputeOne({diff, top, bottom}, bottom.Size(), op);
}
TEST(PoolingBackward, GpuWithInsufficientPadding) {
  float bottom_raw[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
  float top_raw[] = {6, 8, 8, 14, 16, 16, 14, 16, 16};
  float top_diff_raw[] = {0.5,0.4,0.1,0.9,1.2,0.3,1.4,1.5,2.1};
  float expected_raw[] = {0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.500000, 0.000000, 0.500000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 2.300000, 0.000000, 5.100000};


  auto& ms = MinervaSystem::Instance();
  Scale bottom_size{4, 4, 1, 1};
  Scale top_size{3, 3, 1, 1};
  Scale top_diff_size{3, 3, 1, 1};

  shared_ptr<float> bottom_ptr(new float[bottom_size.Prod()], [](float* ptr) { delete[] ptr; });
  shared_ptr<float> top_ptr(new float[top_size.Prod()], [](float* ptr) { delete[] ptr; });
  shared_ptr<float> top_diff_ptr(new float[top_diff_size.Prod()], [](float* ptr) { delete[] ptr; });

  memcpy(bottom_ptr.get(), bottom_raw, bottom_size.Prod() * sizeof(float));
  memcpy(top_ptr.get(), top_raw, top_size.Prod() * sizeof(float));
  memcpy(top_diff_ptr.get(), top_diff_raw, top_diff_size.Prod() * sizeof(float));

  ms.SetDevice(gpu_device);
  ImageBatch bottom = NArray::MakeNArray(bottom_size, bottom_ptr);
  ImageBatch top = NArray::MakeNArray(top_size, top_ptr);
  ImageBatch top_diff = NArray::MakeNArray(top_diff_size, top_diff_ptr);

  PoolingInfo pooling_info(PoolingInfo::Algorithm::kMax, 3, 3, 2, 2, 1, 1);
  ImageBatch output = Convolution::PoolingBackward(top_diff, top, bottom, pooling_info);
  auto output_ptr = output.Get();
  EXPECT_EQ(output.Size(), bottom_size);
  for (int i = 0; i < bottom_size.Prod(); ++i) {
    EXPECT_NEAR(output_ptr.get()[i], expected_raw[i], 0.001);
	//printf("%f, ",output_ptr.get()[i]);
  }
}
TEST(PoolingBackward, GpuWithoutPadding) {
  float top_raw[] = {5,6,18,18};
  float top_diff_raw[] = {1.0,1.1,1.2,1.3};
  float bottom_raw[] = {1,2,3,4,5,6,7,18,9};
  float correct_raw[] = {0,0,0,0,1.0,1.1,0,2.5,0};
  auto& ms = MinervaSystem::Instance();
  Scale top_size{2, 2, 1, 1};
  Scale correct_size{3, 3, 1, 1};
  shared_ptr<float> top_ptr(new float[top_size.Prod()], [](float* ptr) { delete[] ptr; });
  shared_ptr<float> top_diff_ptr(new float[top_size.Prod()], [](float* ptr) { delete[] ptr; });
  shared_ptr<float> bottom_ptr(new float[correct_size.Prod()], [](float* ptr) { delete[] ptr; });
  memcpy(top_ptr.get(), top_raw, top_size.Prod() * sizeof(float));
  memcpy(top_diff_ptr.get(), top_diff_raw, top_size.Prod() * sizeof(float));
  memcpy(bottom_ptr.get(), bottom_raw, correct_size.Prod() * sizeof(float));
  ImageBatch top = NArray::MakeNArray(top_size, top_ptr);
  ImageBatch top_diff = NArray::MakeNArray(top_size, top_diff_ptr);
  ImageBatch bottom = NArray::MakeNArray(correct_size, bottom_ptr);
  PoolingInfo pooling_info(PoolingInfo::Algorithm::kMax, 2, 2, 1, 1);
  ms.SetDevice(gpu_device);
  ImageBatch output = Convolution::PoolingBackward(top_diff, top, bottom, pooling_info);
  auto output_ptr = output.Get();
  EXPECT_EQ(output.Size(), correct_size);
  for (int i = 0; i < correct_size.Prod(); ++i) {
    EXPECT_NEAR(output_ptr.get()[i], correct_raw[i], 0.001);
  }
}
示例#5
0
ImageBatch Convolution::ConvBackwardData(ImageBatch diff, ImageBatch bottom, Filter filter, ConvInfo info) {
  CHECK_EQ(diff.GetNumFeatureMaps(), filter.GetNumOutputs()) << "#output channels mismatch";
  /*
   * We can't get filter size when (top + 2*pad) % stride != 0
  Scale new_size {
    (diff.GetWidth() - 1) * info.stride_horizontal + filter.GetWidth() - 2 * info.pad_width,
    (diff.GetHeight() - 1) * info.stride_vertical + filter.GetHeight() - 2 * info.pad_height,
    filter.GetNumInputs(),
    diff.GetNumImages()
  };
  */
  ConvBackwardDataOp* op = new ConvBackwardDataOp();
  op->closure = {
    info.pad_height,
    info.pad_width,
    info.stride_vertical,
    info.stride_horizontal
  };
  return NArray::ComputeOne({diff, filter}, bottom.Size(), op);
}
示例#6
0
ImageBatch Convolution::ActivationForward(ImageBatch src, ActivationAlgorithm algorithm) {
  ActivationForwardOp* op = new ActivationForwardOp();
  op->closure.algorithm = algorithm;
  return NArray::ComputeOne({src}, src.Size(), op);
}
示例#7
0
ImageBatch Convolution::SoftmaxBackward(ImageBatch diff, ImageBatch top, SoftmaxAlgorithm algorithm) {
  CHECK_EQ(diff.Size(), top.Size()) << "inputs sizes mismatch";
  SoftmaxBackwardOp* op = new SoftmaxBackwardOp();
  op->closure.algorithm = algorithm;
  return NArray::ComputeOne({diff, top}, diff.Size(), op);
}
示例#8
0
ImageBatch Convolution::SoftmaxForward(ImageBatch src, SoftmaxAlgorithm algorithm) {
  SoftmaxForwardOp* op = new SoftmaxForwardOp();
  op->closure.algorithm = algorithm;
  return NArray::ComputeOne({src}, src.Size(), op);
}
示例#9
0
ImageBatch Convolution::LRNBackward(ImageBatch bottom_data, ImageBatch top_data, ImageBatch scale, ImageBatch top_diff , int local_size, float alpha, float beta) {
  LRNBackwardOp* op = new LRNBackwardOp();
  op->closure = {local_size, alpha, beta, bottom_data.Size()};
  return NArray::ComputeOne({bottom_data, top_data, scale, top_diff}, bottom_data.Size(), op);
}
示例#10
0
ImageBatch Convolution::LRNForward(ImageBatch src, ImageBatch scale, int local_size, float alpha, float beta) {
  LRNForwardOp* op = new LRNForwardOp();
  op->closure = {local_size, alpha, beta, src.Size()};
  return NArray::ComputeOne({src, scale}, src.Size(), op);
}