Exemple #1
0
// Do one forward pass of convTrans layer and check to see if its output
// matches the given result
void doOneConvtTest(size_t imgSize,
                    size_t output_x,
                    size_t stride,
                    size_t padding,
                    size_t filter_size,
                    MatrixPtr& result) {
  TestConfig configt;
  configt.biasSize = 1;
  configt.layerConfig.set_type("exconvt");
  configt.layerConfig.set_num_filters(1);
  configt.layerConfig.set_partial_sum(1);
  configt.layerConfig.set_shared_biases(true);

  configt.inputDefs.push_back(
      {INPUT_DATA, "layer_0", output_x * output_x, filter_size * filter_size});
  LayerInputConfig* input = configt.layerConfig.add_inputs();
  ConvConfig* conv = input->mutable_conv_conf();
  conv->set_filter_size(filter_size);
  conv->set_filter_size_y(filter_size);
  conv->set_channels(1);
  conv->set_padding(padding);
  conv->set_padding_y(padding);
  conv->set_stride(stride);
  conv->set_stride_y(stride);
  conv->set_groups(1);
  conv->set_filter_channels(1);
  conv->set_img_size(imgSize);
  conv->set_output_x(output_x);

  configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
                               configt.layerConfig.num_filters());
  configt.layerConfig.set_name("convTrans");

  std::vector<DataLayerPtr> dataLayers;
  LayerMap layerMap;
  vector<Argument> datas;
  initDataLayer(
      configt, &dataLayers, &datas, &layerMap, "convTrans", 1, false, false);
  dataLayers[0]->getOutputValue()->zeroMem();
  dataLayers[0]->getOutputValue()->add(1.0);

  // test layer initialize
  std::vector<ParameterPtr> parameters;
  LayerPtr convtLayer;
  initTestLayer(configt, &layerMap, &parameters, &convtLayer);
  convtLayer->getBiasParameter()->zeroMem();
  convtLayer->getParameters()[0]->zeroMem();
  convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->add(1.0);
  convtLayer->forward(PASS_GC);

  checkMatrixEqual(convtLayer->getOutputValue(), result);
}
Exemple #2
0
static void getMKLDNNConvConfig(TestConfig& cfg, const testConvDesc& pm) {
  cfg.layerConfig.set_type("mkldnn_conv");
  cfg.layerConfig.set_active_type("relu");
  cfg.layerConfig.set_num_filters(pm.oc);
  cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
  cfg.layerConfig.set_shared_biases(true);
  cfg.inputDefs.push_back(
      {INPUT_DATA,
       "layer_0",
       /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
       /* size of weight= */ size_t(pm.oc * pm.ic * pm.fh * pm.fw / pm.gp)});
  LayerInputConfig* input = cfg.layerConfig.add_inputs();
  ConvConfig* conv = input->mutable_conv_conf();
  conv->set_groups(pm.gp);
  conv->set_img_size(pm.iw);
  conv->set_img_size_y(pm.ih);
  conv->set_output_x(pm.ow);
  conv->set_output_y(pm.oh);
  conv->set_filter_size(pm.fw);
  conv->set_filter_size_y(pm.fh);
  conv->set_channels(pm.ic);
  conv->set_padding(pm.pw);
  conv->set_padding_y(pm.ph);
  conv->set_stride(pm.sw);
  conv->set_stride_y(pm.sh);
  conv->set_dilation(pm.dw);
  conv->set_dilation_y(pm.dh);
  conv->set_caffe_mode(true);
  conv->set_filter_channels(conv->channels() / conv->groups());
  CHECK_EQ(conv->filter_channels() * pm.gp, conv->channels())
      << "it is indivisible";

  int fh = (pm.fh - 1) * pm.dh + 1;
  int fw = (pm.fw - 1) * pm.dw + 1;
  int ow = outputSize(pm.iw, fw, pm.pw, pm.sw, true);
  int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
  CHECK_EQ(ow, pm.ow) << "output size check failed";
  CHECK_EQ(oh, pm.oh) << "output size check failed";
}
Exemple #3
0
// Test that the convTrans forward is the same as conv backward
TEST(Layer, convTransLayerFwd) {
    // Setting up conv-trans layer
    TestConfig configt;
    configt.biasSize = 3;
    configt.layerConfig.set_type("exconvt");
    configt.layerConfig.set_num_filters(3);
    configt.layerConfig.set_partial_sum(1);
    configt.layerConfig.set_shared_biases(true);

    configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384});
    LayerInputConfig* input = configt.layerConfig.add_inputs();
    ConvConfig* conv = input->mutable_conv_conf();
    conv->set_filter_size(2);
    conv->set_filter_size_y(4);
    conv->set_channels(16);
    conv->set_padding(0);
    conv->set_padding_y(1);
    conv->set_stride(2);
    conv->set_stride_y(2);
    conv->set_groups(1);
    conv->set_filter_channels(3 / conv->groups());
    conv->set_img_size(16);
    conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
                                  conv->padding(), conv->stride(),
                                  /* caffeMode */ true));
    configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
                                configt.layerConfig.num_filters());
    configt.layerConfig.set_name("convTrans");

    // data layer initialize
    std::vector<DataLayerPtr> dataLayers;
    LayerMap layerMap;
    vector<Argument> datas;
    initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans",
                  100, false, false);
    // test layer initialize
    std::vector<ParameterPtr> parameters;
    LayerPtr convtLayer;
    initTestLayer(configt, &layerMap, &parameters, &convtLayer);
    convtLayer->getBiasParameter()->zeroMem();
    convtLayer->forward(PASS_GC);

    // Setting up conv-layer config
    TestConfig config;
    config.biasSize = 16;
    config.layerConfig.set_type("exconv");
    config.layerConfig.set_num_filters(16);
    config.layerConfig.set_partial_sum(1);
    config.layerConfig.set_shared_biases(true);

    config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 384});
    input = config.layerConfig.add_inputs();
    conv = input->mutable_conv_conf();
    conv->set_filter_size(2);
    conv->set_filter_size_y(4);
    conv->set_channels(3);
    conv->set_padding(0);
    conv->set_padding_y(1);
    conv->set_stride(2);
    conv->set_stride_y(2);
    conv->set_groups(1);
    conv->set_filter_channels(conv->channels() / conv->groups());
    conv->set_img_size(16);
    conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
                                  conv->padding(), conv->stride(),
                                  /* caffeMode */ true));
    config.layerConfig.set_size(conv->output_x() * conv->output_x() *
                                config.layerConfig.num_filters());
    config.layerConfig.set_name("conv");

    // data layer initialize
    std::vector<DataLayerPtr> dataLayers2;
    LayerMap layerMap2;
    vector<Argument> datas2;
    initDataLayer(config, &dataLayers2, &datas2, &layerMap2, "conv",
                  100, false, false);
    // test layer initialize
    std::vector<ParameterPtr> parameters2;
    LayerPtr convLayer;
    initTestLayer(config, &layerMap2, &parameters2, &convLayer);

    // Sync convLayer and convtLayer parameter
    convLayer->getBiasParameter()->zeroMem();
    convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom(
            *(convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)));

    // Set convLayer outputGrad as convTransLayer input value
    convLayer->forward(PASS_GC);
    convLayer->getOutput().grad->copyFrom(*(dataLayers[0]->getOutputValue()));

    vector<int> callbackFlags(parameters2.size(), 0);
    auto callback = [&](Parameter* para) { ++callbackFlags[para->getID()]; };
    convLayer->backward(callback);

    // Check that the convLayer backward is the same as convTransLayer forward
    checkMatrixEqual(convtLayer->getOutputValue(),
                     dataLayers2[0]->getOutputGrad());
}