Ejemplo n.º 1
0
// Do one forward pass of priorBox layer and check to see if its output
// matches the given result
void doOnePriorBoxTest(size_t feature_map_width,
                       size_t feature_map_height,
                       size_t image_width,
                       size_t image_height,
                       vector<int> min_size,
                       vector<int> max_size,
                       vector<real> aspect_ratio,
                       vector<real> variance,
                       bool use_gpu,
                       MatrixPtr& result) {
  // Setting up the priorbox layer
  TestConfig configt;
  configt.layerConfig.set_type("priorbox");

  configt.inputDefs.push_back({INPUT_DATA, "featureMap", 1, 0});
  LayerInputConfig* input = configt.layerConfig.add_inputs();
  configt.inputDefs.push_back({INPUT_DATA, "image", 1, 0});
  configt.layerConfig.add_inputs();
  PriorBoxConfig* pb = input->mutable_priorbox_conf();
  for (size_t i = 0; i < min_size.size(); i++) pb->add_min_size(min_size[i]);
  for (size_t i = 0; i < max_size.size(); i++) pb->add_max_size(max_size[i]);
  for (size_t i = 0; i < variance.size(); i++) pb->add_variance(variance[i]);
  for (size_t i = 0; i < aspect_ratio.size(); i++)
    pb->add_aspect_ratio(aspect_ratio[i]);

  // data layer initialize
  std::vector<DataLayerPtr> dataLayers;
  LayerMap layerMap;
  vector<Argument> datas;
  initDataLayer(
      configt, &dataLayers, &datas, &layerMap, "priorbox", 1, false, use_gpu);
  dataLayers[0]->getOutput().setFrameHeight(feature_map_height);
  dataLayers[0]->getOutput().setFrameWidth(feature_map_width);
  dataLayers[1]->getOutput().setFrameHeight(image_height);
  dataLayers[1]->getOutput().setFrameWidth(image_width);

  // test layer initialize
  std::vector<ParameterPtr> parameters;
  LayerPtr priorboxLayer;
  initTestLayer(configt, &layerMap, &parameters, &priorboxLayer);
  priorboxLayer->forward(PASS_GC);
  checkMatrixEqual(priorboxLayer->getOutputValue(), result);
}
Ejemplo n.º 2
0
static void getMKLDNNConvConfig(TestConfig& cfg, const testConvDesc& pm) {
  cfg.layerConfig.set_type("mkldnn_conv");
  cfg.layerConfig.set_active_type("relu");
  cfg.layerConfig.set_num_filters(pm.oc);
  cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
  cfg.layerConfig.set_shared_biases(true);
  cfg.inputDefs.push_back(
      {INPUT_DATA,
       "layer_0",
       /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
       /* size of weight= */ size_t(pm.oc * pm.ic * pm.fh * pm.fw / pm.gp)});
  LayerInputConfig* input = cfg.layerConfig.add_inputs();
  ConvConfig* conv = input->mutable_conv_conf();
  conv->set_groups(pm.gp);
  conv->set_img_size(pm.iw);
  conv->set_img_size_y(pm.ih);
  conv->set_output_x(pm.ow);
  conv->set_output_y(pm.oh);
  conv->set_filter_size(pm.fw);
  conv->set_filter_size_y(pm.fh);
  conv->set_channels(pm.ic);
  conv->set_padding(pm.pw);
  conv->set_padding_y(pm.ph);
  conv->set_stride(pm.sw);
  conv->set_stride_y(pm.sh);
  conv->set_dilation(pm.dw);
  conv->set_dilation_y(pm.dh);
  conv->set_caffe_mode(true);
  conv->set_filter_channels(conv->channels() / conv->groups());
  CHECK_EQ(conv->filter_channels() * pm.gp, conv->channels())
      << "it is indivisible";

  int fh = (pm.fh - 1) * pm.dh + 1;
  int fw = (pm.fw - 1) * pm.dw + 1;
  int ow = outputSize(pm.iw, fw, pm.pw, pm.sw, true);
  int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
  CHECK_EQ(ow, pm.ow) << "output size check failed";
  CHECK_EQ(oh, pm.oh) << "output size check failed";
}
Ejemplo n.º 3
0
static void getMKLDNNBatchNormConfig(TestConfig& cfg,
                                     const testBatchNormDesc& pm) {
  cfg.layerConfig.set_size(pm.ic * pm.ih * pm.iw);
  cfg.layerConfig.set_type("mkldnn_batch_norm");
  cfg.biasSize = pm.ic;
  cfg.inputDefs.push_back(
      {INPUT_DATA,
       "layer_0",
       /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
       /* size of weight= */ size_t(pm.ic)});
  cfg.inputDefs.push_back(
      {INPUT_DATA, "layer_1_moving_mean", 1, size_t(pm.ic)});
  cfg.inputDefs.back().isStatic = true;
  cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)});
  cfg.inputDefs.back().isStatic = true;
  LayerInputConfig* input = cfg.layerConfig.add_inputs();
  cfg.layerConfig.set_active_type("relu");
  cfg.layerConfig.add_inputs();
  cfg.layerConfig.add_inputs();
  ImageConfig* img_conf = input->mutable_image_conf();
  img_conf->set_channels(pm.ic);
  img_conf->set_img_size_y(pm.ih);
  img_conf->set_img_size(pm.iw);
}
Ejemplo n.º 4
0
// Test that the convTrans forward is the same as conv backward
TEST(Layer, convTransLayerFwd) {
    // Setting up conv-trans layer
    TestConfig configt;
    configt.biasSize = 3;
    configt.layerConfig.set_type("exconvt");
    configt.layerConfig.set_num_filters(3);
    configt.layerConfig.set_partial_sum(1);
    configt.layerConfig.set_shared_biases(true);

    configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384});
    LayerInputConfig* input = configt.layerConfig.add_inputs();
    ConvConfig* conv = input->mutable_conv_conf();
    conv->set_filter_size(2);
    conv->set_filter_size_y(4);
    conv->set_channels(16);
    conv->set_padding(0);
    conv->set_padding_y(1);
    conv->set_stride(2);
    conv->set_stride_y(2);
    conv->set_groups(1);
    conv->set_filter_channels(3 / conv->groups());
    conv->set_img_size(16);
    conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
                                  conv->padding(), conv->stride(),
                                  /* caffeMode */ true));
    configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
                                configt.layerConfig.num_filters());
    configt.layerConfig.set_name("convTrans");

    // data layer initialize
    std::vector<DataLayerPtr> dataLayers;
    LayerMap layerMap;
    vector<Argument> datas;
    initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans",
                  100, false, false);
    // test layer initialize
    std::vector<ParameterPtr> parameters;
    LayerPtr convtLayer;
    initTestLayer(configt, &layerMap, &parameters, &convtLayer);
    convtLayer->getBiasParameter()->zeroMem();
    convtLayer->forward(PASS_GC);

    // Setting up conv-layer config
    TestConfig config;
    config.biasSize = 16;
    config.layerConfig.set_type("exconv");
    config.layerConfig.set_num_filters(16);
    config.layerConfig.set_partial_sum(1);
    config.layerConfig.set_shared_biases(true);

    config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 384});
    input = config.layerConfig.add_inputs();
    conv = input->mutable_conv_conf();
    conv->set_filter_size(2);
    conv->set_filter_size_y(4);
    conv->set_channels(3);
    conv->set_padding(0);
    conv->set_padding_y(1);
    conv->set_stride(2);
    conv->set_stride_y(2);
    conv->set_groups(1);
    conv->set_filter_channels(conv->channels() / conv->groups());
    conv->set_img_size(16);
    conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
                                  conv->padding(), conv->stride(),
                                  /* caffeMode */ true));
    config.layerConfig.set_size(conv->output_x() * conv->output_x() *
                                config.layerConfig.num_filters());
    config.layerConfig.set_name("conv");

    // data layer initialize
    std::vector<DataLayerPtr> dataLayers2;
    LayerMap layerMap2;
    vector<Argument> datas2;
    initDataLayer(config, &dataLayers2, &datas2, &layerMap2, "conv",
                  100, false, false);
    // test layer initialize
    std::vector<ParameterPtr> parameters2;
    LayerPtr convLayer;
    initTestLayer(config, &layerMap2, &parameters2, &convLayer);

    // Sync convLayer and convtLayer parameter
    convLayer->getBiasParameter()->zeroMem();
    convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom(
            *(convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)));

    // Set convLayer outputGrad as convTransLayer input value
    convLayer->forward(PASS_GC);
    convLayer->getOutput().grad->copyFrom(*(dataLayers[0]->getOutputValue()));

    vector<int> callbackFlags(parameters2.size(), 0);
    auto callback = [&](Parameter* para) { ++callbackFlags[para->getID()]; };
    convLayer->backward(callback);

    // Check that the convLayer backward is the same as convTransLayer forward
    checkMatrixEqual(convtLayer->getOutputValue(),
                     dataLayers2[0]->getOutputGrad());
}