bool BlockExpandLayer::init(const LayerMap& layerMap,
                            const ParameterMap& parameterMap) {
  /* Initialize the basic parent class */
  Layer::init(layerMap, parameterMap);

  CHECK_EQ(config_.inputs_size(), 1);
  const BlockExpandConfig& blockConf = config_.inputs(0).block_expand_conf();
  blockH_ = blockConf.block_y();
  blockW_ = blockConf.block_x();
  strideH_ = blockConf.stride_y();
  strideW_ = blockConf.stride_x();
  paddingH_ = blockConf.padding_y();
  paddingW_ = blockConf.padding_x();
  channels_ = blockConf.channels();
  imgSizeH_ = blockConf.img_size_y();
  imgSizeW_ = blockConf.img_size_x();

  std::vector<size_t> strides = {(size_t)strideH_, (size_t)strideW_};
  std::vector<size_t> paddings = {(size_t)paddingH_, (size_t)paddingW_};
  std::vector<size_t> blocks = {(size_t)blockH_, (size_t)blockW_};
  createFunction(forward_,
                 "BlockExpand",
                 FuncConfig()
                     .set("strides", strides)
                     .set("paddings", paddings)
                     .set("blocks", blocks));
  createFunction(backward_,
                 "BlockExpandGrad",
                 FuncConfig()
                     .set("strides", strides)
                     .set("paddings", paddings)
                     .set("blocks", blocks));

  return true;
}
bool CosSimVecMatLayer::init(const LayerMap& layerMap,
                             const ParameterMap& parameterMap) {
  Layer::init(layerMap, parameterMap);

  CHECK_EQ(inputLayers_.size(), 2U);

  size_t dataDim = inputLayers_[0]->getSize();
  size_t numKeys = getSize();
  size_t memoryDim = inputLayers_[1]->getSize();

  CHECK_EQ(dataDim * numKeys, memoryDim) << "Dimension mismatch";

  tmpRow0 = Matrix::create(nullptr,
                           /* height= */ 1,
                           dataDim,
                           /* trans= */ false,
                           useGpu_);
  tmpRow1 = Matrix::create(nullptr,
                           /* height= */ 1,
                           dataDim,
                           /* trans= */ false,
                           useGpu_);
  tmpRow2 = Matrix::create(nullptr,
                           /* height= */ numKeys,
                           1,
                           /* trans= */ false,
                           useGpu_);
  tmpRow3 = Matrix::create(nullptr,
                           /* height= */ numKeys,
                           1,
                           /* trans= */ false,
                           useGpu_);

  tmpMtx0 = Matrix::create(nullptr,
                           /* height= */ numKeys,
                           dataDim,
                           /* trans= */ false,
                           useGpu_);
  tmpMtx1 = Matrix::create(nullptr,
                           /* height= */ numKeys,
                           dataDim,
                           /* trans= */ false,
                           useGpu_);

  CHECK(tmpRow0 && tmpRow1 && tmpRow2 && tmpRow3 && tmpMtx0 && tmpMtx1);

  createFunction(forward_,
                 "CosSimForward",
                 FuncConfig().set("scale", (real)config_.cos_scale()));
  createFunction(backward_,
                 "CosSimBackward",
                 FuncConfig().set("scale", (real)config_.cos_scale()));

  return true;
}
bool ScaleSubRegionLayer::init(const LayerMap& layerMap,
                               const ParameterMap& parameterMap) {
  Layer::init(layerMap, parameterMap);
  CHECK_EQ(static_cast<int>(inputLayers_.size()), 2);
  auto& conf = config_.inputs(0).scale_sub_region_conf();
  value_ = conf.value();

  createFunction(forward_, "ScaleSubRegion", FuncConfig().set("value", value_));
  createFunction(
      backward_, "ScaleSubRegionGrad", FuncConfig().set("value", value_));

  return true;
}
TEST(CrossMapNormalGrad, real) {
  for (size_t numSamples : {5}) {
    for (size_t channels : {1, 5}) {
      for (size_t imgSizeH : {5, 33}) {
        for (size_t imgSizeW : {5, 32}) {
          for (size_t size : {1, 3}) {
            VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
                    << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW
                    << " size=" << size;

            CpuGpuFuncCompare test("CrossMapNormalGrad",
                                   FuncConfig()
                                       .set("size", size)
                                       .set("scale", (real)1.5)
                                       .set("pow", (real)0.5));
            TensorShape shape{numSamples, channels, imgSizeH, imgSizeW};
            test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
            test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
            test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
            test.addInputs(BufferArg(VALUE_TYPE_FLOAT, shape));
            test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, shape));
            // run Function
            test.run();
          }
        }
      }
    }
  }
}
Exemple #5
0
TEST(Crop, real) {
  for (size_t numSamples : {5, 32}) {
    for (size_t channels : {5, 5, 32}) {
      for (size_t imgSizeH : {5, 33, 100}) {
        for (size_t imgSizeW : {5, 32, 96}) {
          VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
                  << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
          for (bool test_grad : {false, true}) {
            CpuGpuFuncCompare compare(
                test_grad ? "CropGrad" : "Crop",
                FuncConfig()
                    .set<std::vector<uint32_t>>("crop_corner", {0, 1, 1, 1})
                    .set<std::vector<uint32_t>>("crop_shape", {0, 2, 3, 3}));
            TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW};
            TensorShape outDims{numSamples, 2, 3, 3};
            compare.addInputs(
                BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims));
            compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT,
                                         test_grad ? inDims : outDims,
                                         test_grad ? ADD_TO : ASSIGN_TO),
                               test_grad ? ADD_TO : ASSIGN_TO);
            compare.run();
          }
        }
      }
    }
  }
}
Exemple #6
0
bool CosSimLayer::init(const LayerMap& layerMap,
                       const ParameterMap& parameterMap) {
  /* Initialize the basic parent class */
  Layer::init(layerMap, parameterMap);

  CHECK_EQ(inputLayers_.size(), 2LU);

  createFunction(forward_,
                 "CosSimForward",
                 FuncConfig().set("scale", (real)config_.cos_scale()));
  createFunction(backward_,
                 "CosSimBackward",
                 FuncConfig().set("scale", (real)config_.cos_scale()));

  return true;
}
TEST(BlockExpandBackward, real) {
  for (size_t batchSize : {5}) {
    for (size_t channels : {1, 5}) {
      for (size_t inputHeight : {5, 33}) {
        for (size_t inputWidth : {5, 32}) {
          for (size_t block : {1, 3, 5}) {
            for (size_t stride : {1, 2}) {
              for (size_t padding : {0, 1}) {
                // init Test object
                std::vector<size_t> strides = {stride, stride};
                std::vector<size_t> paddings = {padding, padding};
                std::vector<size_t> blocks = {block, block};
                CpuGpuFuncCompare test("BlockExpandGrad",
                                       FuncConfig()
                                           .set("strides", strides)
                                           .set("paddings", paddings)
                                           .set("blocks", blocks));

                size_t outputHeight =
                    1 +
                    (inputHeight + 2 * padding - block + stride - 1) / stride;
                size_t outputWidth =
                    1 +
                    (inputWidth + 2 * padding - block + stride - 1) / stride;
                TensorShape inputShape =
                    TensorShape({batchSize, channels, inputHeight, inputWidth});
                TensorShape outputShape =
                    TensorShape({batchSize,
                                 outputHeight * outputWidth,
                                 channels * block * block});
                test.addInputs(BufferArg(VALUE_TYPE_FLOAT, outputShape));
                test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, inputShape),
                                ADD_TO);
                // run Function
                test.run();
              }
            }
          }
        }
      }
    }
  }
}