TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) {
  Caffe::set_mode(Caffe::CPU);
  LayerParameter param;
  DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param();
  dummy_data_param->add_num(5);
  dummy_data_param->add_channels(3);
  dummy_data_param->add_height(2);
  dummy_data_param->add_width(4);
  this->blob_top_vec_.resize(1);
  DummyDataLayer<TypeParam> layer(param);
  layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
  EXPECT_EQ(this->blob_top_a_->num(), 5);
  EXPECT_EQ(this->blob_top_a_->channels(), 3);
  EXPECT_EQ(this->blob_top_a_->height(), 2);
  EXPECT_EQ(this->blob_top_a_->width(), 4);
  EXPECT_EQ(this->blob_top_b_->count(), 0);
  EXPECT_EQ(this->blob_top_c_->count(), 0);
  for (int i = 0; i < this->blob_top_vec_.size(); ++i) {
    for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) {
      EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]);
    }
  }
  layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
  for (int i = 0; i < this->blob_top_vec_.size(); ++i) {
    for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) {
      EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]);
    }
  }
}
Example #2
0
shared_ptr<Layer<Dtype> > GetPoolingLayer(const LayerParameter& param) {
  PoolingParameter_Engine engine = param.pooling_param().engine();
  if (engine == PoolingParameter_Engine_DEFAULT) {
    engine = PoolingParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = PoolingParameter_Engine_CUDNN;
#endif
  }
  if (engine == PoolingParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == PoolingParameter_Engine_CUDNN) {
    PoolingParameter p_param = param.pooling_param();
    if (p_param.pad() || p_param.pad_h() || p_param.pad_w() ||
        param.top_size() > 1) {
      LOG(INFO) << "CUDNN does not support padding or multiple tops. "
                << "Using Caffe's own pooling layer.";
      return shared_ptr<Layer<Dtype> >(new PoolingLayer<Dtype>(param));
    }
    return shared_ptr<Layer<Dtype> >(new CuDNNPoolingLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
}
Example #3
0
TYPED_TEST(HDF5DataLayerTest, TestSkip) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter param;
  param.add_top("data");
  param.add_top("label");

  HDF5DataParameter* hdf5_data_param = param.mutable_hdf5_data_param();
  int batch_size = 5;
  hdf5_data_param->set_batch_size(batch_size);
  hdf5_data_param->set_source(*(this->filename));

  Caffe::set_solver_count(8);
  for (int dev = 0; dev < Caffe::solver_count(); ++dev) {
    Caffe::set_solver_rank(dev);

    HDF5DataLayer<Dtype> layer(param);
    layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    int label = dev;
    for (int iter = 0; iter < 1; ++iter) {
      layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
      for (int i = 0; i < batch_size; ++i) {
        EXPECT_EQ(1 + label, this->blob_top_label_->cpu_data()[i]);
        label = (label + Caffe::solver_count()) % (batch_size * 2);
      }
    }
  }
  Caffe::set_solver_count(1);
  Caffe::set_solver_rank(0);
}
Example #4
0
shared_ptr<Layer<Dtype> > GetTanHLayer(const LayerParameter& param) {
  TanHParameter_Engine engine = param.tanh_param().engine();

  // New, more flexible way of providing engine
  if (engine == TanHParameter_Engine_DEFAULT && param.engine() != "") {
    EngineParser ep(param.engine());
    if (ep.isEngine("CAFFE"))
      engine = TanHParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    if (ep.isEngine("CUDNN"))
      engine = TanHParameter_Engine_CUDNN;
#endif
  }

  if (engine == TanHParameter_Engine_DEFAULT) {
    engine = TanHParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = TanHParameter_Engine_CUDNN;
#endif
  }
  if (engine == TanHParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new TanHLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == TanHParameter_Engine_CUDNN) {
    return shared_ptr<Layer<Dtype> >(new CuDNNTanHLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
  return shared_ptr<Layer<Dtype> >();
}
Example #5
0
shared_ptr<Layer<Dtype> > GetLRNLayer(const LayerParameter& param) {
  LRNParameter_Engine engine = param.lrn_param().engine();

  if (engine == LRNParameter_Engine_DEFAULT) {
    engine = LRNParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = LRNParameter_Engine_CUDNN;
#endif
  }

  if (engine == LRNParameter_Engine_CAFFE
      || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL) {
    return shared_ptr<Layer<Dtype> >(new LRNLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == LRNParameter_Engine_CUDNN) {
    LRNParameter lrn_param = param.lrn_param();

    if (lrn_param.norm_region() ==LRNParameter_NormRegion_WITHIN_CHANNEL) {
      return shared_ptr<Layer<Dtype> >(new CuDNNLCNLayer<Dtype>(param));
    } else {
      // local size is too big to be handled through cuDNN
      if (param.lrn_param().local_size() > CUDNN_LRN_MAX_N) {
        return shared_ptr<Layer<Dtype> >(new LRNLayer<Dtype>(param));
      } else {
        return shared_ptr<Layer<Dtype> >(new CuDNNLRNLayer<Dtype>(param));
      }
    }
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
}
Example #6
0
 bool BaseProducer::is_valid(const LayerParameter& param) {
   if(!param.has_type()) return false;
   switch (param.type()) {
   case LayerParameter_LayerType_DATA: return true; 
   default: return false;
   }
   return false;
 }
Example #7
0
 // Get a layer using a LayerParameter.
 static shared_ptr<Layer<Dtype> > CreateLayer(const LayerParameter& param) {
   LOG(INFO) << "Creating layer " << param.name();
   const string& type = param.type();
   CreatorRegistry& registry = Registry();
   CHECK_EQ(registry.count(type), 1) << "Unknown layer type: " << type
       << " (known types: " << LayerTypeList() << ")";
   return registry[type](param);
 }
Example #8
0
AnnotatedDataLayer<Dtype>::AnnotatedDataLayer(const LayerParameter& param)
  : BasePrefetchingDataLayer<Dtype>(param),
    //reader_(param) {
    offset_() {
  db_.reset(db::GetDB(param.data_param().backend()));
  db_->Open(param.data_param().source(), db::READ);
  cursor_.reset(db_->NewCursor());
}
template<typename Dtype> TransformingFastHDF5InputLayer<Dtype>::
TransformingFastHDF5InputLayer(const LayerParameter& param)
    : Layer<Dtype>(param) {
  // Set the BS to 1 before we create the layer
  LayerParameter p = param;
  p.mutable_fast_hdf5_input_param()->set_batch_size(1);
  input_layer_.reset(new FastHDF5InputLayer<Dtype>(p));
  transformation_layer_.reset(new TransformationLayer<Dtype>(p));
}
size_t BasePrefetchingDataLayer<Ftype, Btype>::parser_threads(const LayerParameter& param) {
  // Check user's override in prototxt file
  size_t parser_threads = param.data_param().parser_threads();
  if (!auto_mode(param) && parser_threads == 0U) {
    parser_threads = 1U;  // input error fix
  }
  // 1 thread for test net
  return (auto_mode(param) || param.phase() == TEST || parser_threads == 0U) ? 1U : parser_threads;
}
  void TestReadCropTrainSequenceUnseeded() {
    LayerParameter param;
    param.set_phase(TRAIN);
    DataParameter* data_param = param.mutable_data_param();
    data_param->set_batch_size(5);
    data_param->set_source(filename_->c_str());
    data_param->set_backend(backend_);

    TransformationParameter* transform_param =
        param.mutable_transform_param();
    transform_param->set_crop_size(1);
    transform_param->set_mirror(true);

    // Get crop sequence with Caffe seed 1701, srand seed 1701.
    Caffe::set_random_seed(seed_);
    srand(seed_);
    vector<vector<Dtype> > crop_sequence;
    {
      DataLayer<Dtype> layer1(param);
      layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
      for (int iter = 0; iter < 2; ++iter) {
        layer1.Forward(blob_bottom_vec_, blob_top_vec_);
        for (int i = 0; i < 5; ++i) {
          EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
        }
        vector<Dtype> iter_crop_sequence;
        for (int i = 0; i < 5; ++i) {
          for (int j = 0; j < 2; ++j) {
            iter_crop_sequence.push_back(
                blob_top_data_->cpu_data()[i * 2 + j]);
          }
        }
        crop_sequence.push_back(iter_crop_sequence);
      }
    }  // destroy 1st data layer and unlock the db

    // Get crop sequence continuing from previous Caffe RNG state; reseed
    // srand with 1701. Check that the sequence differs from the original.
    srand(seed_);
    DataLayer<Dtype> layer2(param);
    layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
    for (int iter = 0; iter < 2; ++iter) {
      layer2.Forward(blob_bottom_vec_, blob_top_vec_);
      for (int i = 0; i < 5; ++i) {
        EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
      }
      int num_sequence_matches = 0;
      for (int i = 0; i < 5; ++i) {
        for (int j = 0; j < 2; ++j) {
          num_sequence_matches += (crop_sequence[iter][i * 2 + j] ==
                                   blob_top_data_->cpu_data()[i * 2 + j]);
        }
      }
      EXPECT_LT(num_sequence_matches, 10);
    }
  }
Example #12
0
shared_ptr<Layer<Dtype> > GetBNLayer(const LayerParameter& param) {
  BNParameter_Type BN_type = param.bn_param().bn_type();
  if (BN_type == BNParameter_Type_CHANNEL_WISE) {
    return shared_ptr<Layer<Dtype> >(new ChannlWiseBNLayer<Dtype>(param));
  } else if (BN_type == BNParameter_Type_ELEMENT_WISE) {
    return shared_ptr<Layer<Dtype> >(new EltWiseBNLayer<Dtype>(param));
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown type.";
  }
}
Example #13
0
  void TestRead2() {
    std::cerr << "\ntestRead2\n";
    LayerParameter param;
    DataParameter* data_param =
        param.mutable_data_param();
    // half the previous batch size to alternate between 2 different dataset
    data_param->set_batch_size(3);
    data_param->set_backend(backend_);
    data_param->set_source(filename_->c_str());
    SparseDataLayer<Dtype> layer(param);
    layer.SetUp(blob_bottom_vec_, blob_top_vec_);
    EXPECT_EQ(blob_top_data_->num(), 3);
    EXPECT_EQ(blob_top_data_->channels(), 6);
    EXPECT_EQ(blob_top_data_->height(), 1);
    EXPECT_EQ(blob_top_data_->width(), 1);
    EXPECT_EQ(blob_top_label_->num(), 3);
    EXPECT_EQ(blob_top_label_->channels(), 1);
    EXPECT_EQ(blob_top_label_->height(), 1);
    EXPECT_EQ(blob_top_label_->width(), 1);

    int delta = 0;
    for (int iter = 0; iter < 100; ++iter) {
      layer.Forward(blob_bottom_vec_, blob_top_vec_);
      if (iter % 2) {
        delta = 3;
      } else {
        delta = 0;
      }
      for (int i = 0; i < 3; ++i) {
        EXPECT_EQ(i + delta, blob_top_label_->cpu_data()[i]);
      }

      EXPECT_EQ(0, blob_top_data_->cpu_ptr()[0]);
      if (delta == 0) {
        EXPECT_EQ(1, blob_top_data_->cpu_ptr()[1]);
        EXPECT_EQ(3, blob_top_data_->cpu_ptr()[2]);
        EXPECT_EQ(6, blob_top_data_->cpu_ptr()[3]);
      } else {
        EXPECT_EQ(4, blob_top_data_->cpu_ptr()[1]);
        EXPECT_EQ(9, blob_top_data_->cpu_ptr()[2]);
        EXPECT_EQ(15, blob_top_data_->cpu_ptr()[3]);
      }
      for (int i = 0; i < 3; ++i) {
        for (int j = 0; j < i + delta; ++j) {
          EXPECT_EQ(j+1,
              blob_top_data_->cpu_data()[blob_top_data_->cpu_ptr()[i]+j])
              << "debug data: iter " << iter << " i " << i << " j " << j;
          EXPECT_EQ(j,
              blob_top_data_->cpu_indices()[blob_top_data_->cpu_ptr()[i]+j])
              << "debug indices: iter " << iter << " i " << i << " j " << j;
        }
      }
    }
  }
  void TestReadCropTrainSequenceSeeded() {
    LayerParameter param;
    param.set_phase(TRAIN);
    DataParameter* data_param = param.mutable_data_param();
    data_param->set_batch_size(5);
    data_param->set_source(filename_->c_str());
    data_param->set_backend(backend_);

    TransformationParameter* transform_param =
        param.mutable_transform_param();
    transform_param->set_crop_size(1);
    transform_param->set_mirror(true);

    // Get crop sequence with Caffe seed 1701.
    Caffe::set_random_seed(seed_);
    vector<vector<Dtype> > crop_sequence;
    {
      DataLayer<Dtype> layer1(param);
      layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
      for (int iter = 0; iter < 2; ++iter) {
        layer1.Forward(blob_bottom_vec_, blob_top_vec_);
        for (int i = 0; i < 5; ++i) {
          EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
        }
        vector<Dtype> iter_crop_sequence;
        for (int i = 0; i < 5; ++i) {
          for (int j = 0; j < 2; ++j) {
            iter_crop_sequence.push_back(
                blob_top_data_->cpu_data()[i * 2 + j]);
          }
        }
        crop_sequence.push_back(iter_crop_sequence);
      }
    }  // destroy 1st data layer and unlock the db

    // Get crop sequence after reseeding Caffe with 1701.
    // Check that the sequence is the same as the original.
    Caffe::set_random_seed(seed_);
    DataLayer<Dtype> layer2(param);
    layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
    for (int iter = 0; iter < 2; ++iter) {
      layer2.Forward(blob_bottom_vec_, blob_top_vec_);
      for (int i = 0; i < 5; ++i) {
        EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
      }
      for (int i = 0; i < 5; ++i) {
        for (int j = 0; j < 2; ++j) {
          EXPECT_EQ(crop_sequence[iter][i * 2 + j],
                    blob_top_data_->cpu_data()[i * 2 + j])
              << "debug: iter " << iter << " i " << i << " j " << j;
        }
      }
    }
  }
Example #15
0
shared_ptr<Layer<Dtype> > GetInvPoolingLayer(const LayerParameter& param) {
  PoolingParameter_Engine engine = param.pooling_param().engine();
  if (engine == PoolingParameter_Engine_DEFAULT) {
    engine = PoolingParameter_Engine_CAFFE;
  }
  if (engine == PoolingParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new InvPoolingLayer<Dtype>(param));
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
}
Example #16
0
shared_ptr<Layer<Dtype> > GetPythonLayer(const LayerParameter& param) {
  Py_Initialize();
  try {
    bp::object module = bp::import(param.python_param().module().c_str());
    bp::object layer = module.attr(param.python_param().layer().c_str())(param);
    return bp::extract<shared_ptr<PythonLayer<Dtype> > >(layer)();
  } catch (bp::error_already_set) {
    PyErr_Print();
    throw;
  }
}
  void TestReadCrop(Phase phase) {
    const Dtype scale = 3;
    LayerParameter param;
    param.set_phase(phase);
    Caffe::set_random_seed(1701);

    DataParameter* data_param = param.mutable_data_param();
    data_param->set_batch_size(5);
    data_param->set_source(filename_->c_str());
    data_param->set_backend(backend_);

    TransformationParameter* transform_param =
        param.mutable_transform_param();
    transform_param->set_scale(scale);
    transform_param->set_crop_size(1);

    DataLayer<Dtype> layer(param);
    layer.SetUp(blob_bottom_vec_, blob_top_vec_);
    EXPECT_EQ(blob_top_data_->num(), 5);
    EXPECT_EQ(blob_top_data_->channels(), 2);
    EXPECT_EQ(blob_top_data_->height(), 1);
    EXPECT_EQ(blob_top_data_->width(), 1);
    EXPECT_EQ(blob_top_label_->num(), 5);
    EXPECT_EQ(blob_top_label_->channels(), 1);
    EXPECT_EQ(blob_top_label_->height(), 1);
    EXPECT_EQ(blob_top_label_->width(), 1);

    for (int iter = 0; iter < 2; ++iter) {
      layer.Forward(blob_bottom_vec_, blob_top_vec_);
      for (int i = 0; i < 5; ++i) {
        EXPECT_EQ(i, blob_top_label_->cpu_data()[i]);
      }
      int num_with_center_value = 0;
      for (int i = 0; i < 5; ++i) {
        for (int j = 0; j < 2; ++j) {
          const Dtype center_value = scale * (j ? 17 : 5);
          num_with_center_value +=
              (center_value == blob_top_data_->cpu_data()[i * 2 + j]);
          // At TEST time, check that we always get center value.
          if (phase == caffe::TEST) {
            EXPECT_EQ(center_value, this->blob_top_data_->cpu_data()[i * 2 + j])
                << "debug: iter " << iter << " i " << i << " j " << j;
          }
        }
      }
      // At TRAIN time, check that we did not get the center crop all 10 times.
      // (This check fails with probability 1-1/12^10 in a correct
      // implementation, so we call set_random_seed.)
      if (phase == caffe::TRAIN) {
        EXPECT_LT(num_with_center_value, 10);
      }
    }
  }
Example #18
0
TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) {
  typedef typename TypeParam::Dtype Dtype;

  LayerParameter param;
  MemoryDataParameter* memory_data_param = param.mutable_memory_data_param();
  memory_data_param->set_batch_size(this->batch_size_);
  memory_data_param->set_channels(this->channels_);
  memory_data_param->set_height(this->height_);
  memory_data_param->set_width(this->width_);
  MemoryDataLayer<Dtype> layer(param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  // We add batch_size*num_iter items, then for each iteration
  // we forward batch_size elements
  int num_iter = 5;
  vector<Datum> datum_vector(this->batch_size_ * num_iter);
  const size_t count = this->channels_ * this->height_ * this->width_;
  size_t pixel_index = 0;
  for (int i = 0; i < this->batch_size_ * num_iter; ++i) {
    datum_vector[i].set_channels(this->channels_);
    datum_vector[i].set_height(this->height_);
    datum_vector[i].set_width(this->width_);
    datum_vector[i].set_label(i);
    vector<char> pixels(count);
    for (int j = 0; j < count; ++j) {
      pixels[j] = pixel_index++ % 256;
    }
    datum_vector[i].set_data(&(pixels[0]), count);
  }
  layer.AddDatumVector(datum_vector);

  int data_index;
  // Go through the data 5 times
  for (int iter = 0; iter < num_iter; ++iter) {
    int offset = this->batch_size_ * iter;
    layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
    const Dtype* data = this->data_blob_->cpu_data();
    size_t index = 0;
    for (int i = 0; i < this->batch_size_; ++i) {
      const string& data_string = datum_vector[offset + i].data();
      EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]);
      for (int c = 0; c < this->channels_; ++c) {
        for (int h = 0; h < this->height_; ++h) {
          for (int w = 0; w < this->width_; ++w) {
            data_index = (c * this->height_ + h) * this->width_ + w;
            EXPECT_EQ(static_cast<Dtype>(
                static_cast<uint8_t>(data_string[data_index])),
                      data[index++]);
          }
        }
      }
    }
  }
}
Example #19
0
TEST_F(ProtoTest, TestSerialization) {
  LayerParameter param;
  param.set_name("test");
  param.set_type("dummy");
  std::cout << "Printing in binary format." << std::endl;
  std::cout << param.SerializeAsString() << std::endl;
  std::cout << "Printing in text format." << std::endl;
  std::string str;
  google::protobuf::TextFormat::PrintToString(param, &str);
  std::cout << str << std::endl;
  EXPECT_TRUE(true);
}
Example #20
0
  boost::shared_ptr<Producer> BaseProducer::get_producer(const LayerParameter& param) {
    CHECK(BaseProducer::is_valid(param)) << "Cannot generate data producer\""
					 << param.name() << "\"." << endl;
    switch (param.type()) {
    case LayerParameter_LayerType_DATA: 
      return boost::shared_ptr<Producer>(new DataProducer(param));
      break;
    default: LOG(FATAL) << "Cannot generate data producer\"" << param.name()
			<< "\"."  << endl;
    }
    return boost::shared_ptr<Producer>(); // Dummy return. will never be used.
  }
Example #21
0
DataReader::DataReader(const LayerParameter& param)
    : queue_pair_(new QueuePair(  //
        param.data_param().prefetch() * param.data_param().batch_size())) {
  // Get or create a body
  boost::mutex::scoped_lock lock(bodies_mutex_);
  string key = source_key(param);
  weak_ptr<Body>& weak = bodies_[key];
  body_ = weak.lock();
  if (!body_) {
    body_.reset(new Body(param));
    bodies_[key] = weak_ptr<Body>(body_);
  }
  body_->new_queue_pairs_.push(queue_pair_);
}
TYPED_TEST(HDF5OutputLayerTest, TestForward) {
  typedef typename TypeParam::Dtype Dtype;
  LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
  hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
                          H5P_DEFAULT);
  ASSERT_GE(file_id, 0)<< "Failed to open HDF5 file" <<
      this->input_file_name_;
  // Allow reshape here as we are loading data not params
  bool reshape = true;
  hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 5,
                       this->blob_data_, reshape);
  hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 5,
                       this->blob_label_, reshape);
  herr_t status = H5Fclose(file_id);
  EXPECT_GE(status, 0)<< "Failed to close HDF5 file " <<
      this->input_file_name_;
  this->blob_bottom_vec_.push_back(this->blob_data_);
  this->blob_bottom_vec_.push_back(this->blob_label_);

  LayerParameter param;
  param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
  // This code block ensures that the layer is deconstructed and
  //   the output hdf5 file is closed.
  {
    HDF5OutputLayer<Dtype> layer(param);
    layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    EXPECT_EQ(layer.file_name(), this->output_file_name_);
    layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  }
  file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY,
                          H5P_DEFAULT);
  ASSERT_GE(
    file_id, 0)<< "Failed to open HDF5 file" <<
          this->input_file_name_;

  Blob<Dtype>* blob_data = new Blob<Dtype>();
  hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 5,
                       blob_data, reshape);
  this->CheckBlobEqual(*(this->blob_data_), *blob_data);

  Blob<Dtype>* blob_label = new Blob<Dtype>();
  hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 5,
                       blob_label, reshape);
  this->CheckBlobEqual(*(this->blob_label_), *blob_label);

  status = H5Fclose(file_id);
  EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
      this->output_file_name_;
}
Example #23
0
TYPED_TEST(HDF5OutputLayerTest, TestForward) {
  LOG(INFO) << "Loading HDF5 file " << this->input_file_name_;
  hid_t file_id = H5Fopen(this->input_file_name_.c_str(), H5F_ACC_RDONLY,
                          H5P_DEFAULT);
  ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" <<
      this->input_file_name_;
  hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
                       this->blob_data_);
  hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
                       this->blob_label_);
  herr_t status = H5Fclose(file_id);
  EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
      this->input_file_name_;
  this->blob_bottom_vec_.push_back(this->blob_data_);
  this->blob_bottom_vec_.push_back(this->blob_label_);

  Caffe::Brew modes[] = { Caffe::CPU, Caffe::GPU };
  for (int m = 0; m < 2; ++m) {
    Caffe::set_mode(modes[m]);
    LayerParameter param;
    param.mutable_hdf5_output_param()->set_file_name(this->output_file_name_);
    // This code block ensures that the layer is deconstructed and
    //   the output hdf5 file is closed.
    {
      HDF5OutputLayer<TypeParam> layer(param);
      EXPECT_EQ(layer.file_name(), this->output_file_name_);
      layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_);
      layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_);
    }
    hid_t file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY,
                            H5P_DEFAULT);
    ASSERT_GE(file_id, 0) << "Failed to open HDF5 file" <<
        this->input_file_name_;

    Blob<TypeParam>* blob_data = new Blob<TypeParam>();
    hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4,
                         blob_data);
    this->CheckBlobEqual(*(this->blob_data_), *blob_data);

    Blob<TypeParam>* blob_label = new Blob<TypeParam>();
    hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4,
                         blob_label);
    this->CheckBlobEqual(*(this->blob_label_), *blob_label);

    herr_t status = H5Fclose(file_id);
    EXPECT_GE(status, 0) << "Failed to close HDF5 file " <<
        this->output_file_name_;
  }
}
Example #24
0
 /**
  * Called by SetUp to initialize the weights associated with any top blobs in
  * the loss function. Store non-zero loss weights in the diff blob.
  */
 inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
   const int num_loss_weights = layer_param_.loss_weight_size();
   if (num_loss_weights) {
     CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
         "unspecified or specified once per top blob.";
     for (int top_id = 0; top_id < top.size(); ++top_id) {
       const Dtype loss_weight = layer_param_.loss_weight(top_id);
       if (loss_weight == Dtype(0)) { continue; }
       this->set_loss(top_id, loss_weight);
       const int count = top[top_id]->count();
       Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
       caffe_set(count, loss_weight, loss_multiplier);
     }
   }
 }
Example #25
0
shared_ptr<Layer<Dtype> > GetReLULayer(const LayerParameter& param) {
  ReLUParameter_Engine engine = param.relu_param().engine();

  // New, more flexible way of providing engine
  if (engine == ReLUParameter_Engine_DEFAULT && param.engine() != "") {
    EngineParser ep(param.engine());

    if (ep.isEngine("CAFFE"))
      engine = ReLUParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    else if (ep.isEngine("CUDNN"))
      engine = ReLUParameter_Engine_CUDNN;
#endif
#if defined(MKL2017_SUPPORTED)
    else if (ep.isEngine("MKL2017"))
      engine = ReLUParameter_Engine_MKL2017;
#endif
#if defined(MKLDNN_SUPPORTED)
    else if (ep.isEngine("MKLDNN"))
      engine = ReLUParameter_Engine_MKLDNN;
#endif
  }

  if (engine == ReLUParameter_Engine_DEFAULT) {
    engine = ReLUParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = ReLUParameter_Engine_CUDNN;
#endif
  }
  if (engine == ReLUParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new ReLULayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == ReLUParameter_Engine_CUDNN) {
    return shared_ptr<Layer<Dtype> >(new CuDNNReLULayer<Dtype>(param));
#endif
#ifdef MKL2017_SUPPORTED
  } else if (engine == ReLUParameter_Engine_MKL2017) {
    return shared_ptr<Layer<Dtype> >(new MKLReLULayer<Dtype>(param));
#endif
#ifdef MKLDNN_SUPPORTED
  } else if (engine == ReLUParameter_Engine_MKLDNN) {
    return shared_ptr<Layer<Dtype> >(new MKLDNNReLULayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
  return shared_ptr<Layer<Dtype> >();
}
Example #26
0
void LRNLayerTest<Dtype>::ReferenceLRNForward(
    const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
    Blob<Dtype>* blob_top) {
  blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
      blob_bottom.height(), blob_bottom.width());
  Dtype* top_data = blob_top->mutable_cpu_data();
  LRNParameter lrn_param = layer_param.lrn_param();
  Dtype alpha = lrn_param.alpha();
  Dtype beta = lrn_param.beta();
  int size = lrn_param.local_size();
  switch (lrn_param.norm_region()) {
  case LRNParameter_NormRegion_ACROSS_CHANNELS:
    for (int n = 0; n < blob_bottom.num(); ++n) {
      for (int c = 0; c < blob_bottom.channels(); ++c) {
        for (int h = 0; h < blob_bottom.height(); ++h) {
          for (int w = 0; w < blob_bottom.width(); ++w) {
            int c_start = c - (size - 1) / 2;
            int c_end = min(c_start + size, blob_bottom.channels());
            c_start = max(c_start, 0);
            Dtype scale = 1.;
            for (int i = c_start; i < c_end; ++i) {
              Dtype value = blob_bottom.data_at(n, i, h, w);
              scale += value * value * alpha / size;
            }
            *(top_data + blob_top->offset(n, c, h, w)) =
              blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
          }
        }
      }
    }
    break;
  case LRNParameter_NormRegion_WITHIN_CHANNEL:
    for (int n = 0; n < blob_bottom.num(); ++n) {
      for (int c = 0; c < blob_bottom.channels(); ++c) {
        for (int h = 0; h < blob_bottom.height(); ++h) {
          int h_start = h - (size - 1) / 2;
          int h_end = min(h_start + size, blob_bottom.height());
          h_start = max(h_start, 0);
          for (int w = 0; w < blob_bottom.width(); ++w) {
            Dtype scale = 1.;
            int w_start = w - (size - 1) / 2;
            int w_end = min(w_start + size, blob_bottom.width());
            w_start = max(w_start, 0);
            for (int nh = h_start; nh < h_end; ++nh) {
              for (int nw = w_start; nw < w_end; ++nw) {
                Dtype value = blob_bottom.data_at(n, c, nh, nw);
                scale += value * value * alpha / (size * size);
              }
            }
            *(top_data + blob_top->offset(n, c, h, w)) =
              blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
          }
        }
      }
    }
    break;
  default:
    LOG(FATAL) << "Unknown normalization region.";
  }
}
WarpCTCLossLayer<Dtype>::WarpCTCLossLayer(const LayerParameter& param)
     : LossLayer<Dtype>(param),
       T_(0),
       N_(0),
       C_(0) {
  blank_index_ = param.ctc_loss_param().blank_index();
}
Example #28
0
BasePrefetchingDataLayer<Dtype>::BasePrefetchingDataLayer(
    const LayerParameter& param)
    : BaseDataLayer<Dtype>(param),
  untransformed_top_(false), prefetch_free_(), prefetch_full_(),
  prefetch_free_untransformed_(),  prefetch_full_untransformed_()  {

  if (param.transform_param().has_untransformed_top() &&
      param.transform_param().untransformed_top())
    untransformed_top_ = true;

  for (int i = 0; i < PREFETCH_COUNT; ++i) {
    prefetch_free_.push(&prefetch_[i]);
    if (untransformed_top_)
      prefetch_free_untransformed_.push(&prefetch_untransformed_[i]);
  }
}
Example #29
0
shared_ptr<Layer<Dtype> > GetDeconvolutionLayer(
    const LayerParameter& param) {
  ConvolutionParameter conv_param = param.convolution_param();
  ConvolutionParameter_Engine engine = conv_param.engine();

#if defined(MKL2017_SUPPORTED)
  bool use_dilation = false;
  for (int i = 0; i < conv_param.dilation_size(); ++i) {
    if (conv_param.dilation(i) > 1) {
      use_dilation = true;
    }
  }
#endif

  // New, more flexible way of providing engine
  if (engine == ConvolutionParameter_Engine_DEFAULT && param.engine() != "") {
    EngineParser ep(param.engine());

    if (ep.isEngine("CAFFE")) {
      engine = ConvolutionParameter_Engine_CAFFE;
    }
#ifdef MKL2017_SUPPORTED
    else if (!use_dilation && ep.isEngine("MKL2017")) {
      engine = ConvolutionParameter_Engine_MKL2017;
    }
#endif

  }

  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
  }
  if (engine == ConvolutionParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new DeconvolutionLayer<Dtype>(param));
#ifdef MKL2017_SUPPORTED
  } else if (engine == ConvolutionParameter_Engine_MKL2017) {
    if (use_dilation) {
      LOG(FATAL) << "MKL2017 doesn't support the dilated convolution at Layer "
                 << param.name();
    }
    return shared_ptr<Layer<Dtype> >(new MKLDeconvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
  return shared_ptr<Layer<Dtype> >();
}
Example #30
0
HDF5OutputLayer<Dtype>::HDF5OutputLayer(const LayerParameter& param)
    : Layer<Dtype>(param),
      file_name_(param.hdf5_output_param().file_name()) {
  /* create a HDF5 file */
  file_id_ = H5Fcreate(file_name_.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
                       H5P_DEFAULT);
  CHECK_GE(file_id_, 0) << "Failed to open HDF5 file" << file_name_;
}