コード例 #1
0
  bool ConvolutionLayer::init() {
    if(initialized) return true;
    if((layer_param_.blobs_size() < 1) ||
       (layer_param_.convolution_param().bias_term() && 
	layer_param_.blobs_size() < 2)) return false;

    LOG(INFO) << "Starting to initialize " << this->name() << endl;

    const BlobProto& weights_blob = layer_param_.blobs(0);
    LOG(INFO) << "CONV WEIGHTS: " << endl << weights_blob.width() << endl
	      << weights_blob.height() << endl << weights_blob.channels() << endl
	      << weights_blob.num() << endl;

    weights_host_ = new float[weights_blob.width()*weights_blob.height()*
			      weights_blob.channels()*weights_blob.num()];
    Buffer weights_buf(type_of<float>(), weights_blob.width(), weights_blob.height(),
		      weights_blob.channels(), weights_blob.num(), 
		      (uint8_t*) (weights_host_));
    for(int x = 0, x_end = weights_blob.width(); x < x_end; x++) {
      for(int y = 0, y_end = weights_blob.height(); y < y_end; y++) {
	for(int c = 0, c_end = weights_blob.channels(); c < c_end; c++) {
	  for(int n = 0, n_end = weights_blob.num(); n < n_end; n++) {
	    int index = x + y*weights_blob.width() + c*weights_blob.width()*
	      weights_blob.height() + n*weights_blob.width()*
	      weights_blob.height()*weights_blob.channels();
	    weights_host_[index] = weights_blob.data(index);
	  }
	}
      }
    }
    
    if(!weights_.defined())
      weights_ = ImageParam(weights_buf.type(), 4);
    weights_.set(weights_buf);

    if(layer_param_.convolution_param().bias_term()) {
      const BlobProto bias_blob = layer_param_.blobs(1);
      bias_host_ = new float[bias_blob.width()*bias_blob.height()*
			     bias_blob.channels()*bias_blob.num()];
      Buffer bias_buf(type_of<float>(), bias_blob.width(), bias_blob.height(),
		      bias_blob.channels(), bias_blob.num(),(uint8_t*) bias_host_);

      LOG(INFO) << "CONV BIAS" << endl << bias_blob.width() << endl
		<< bias_blob.height() << endl << bias_blob.channels() << endl 
		<< bias_blob.num() << endl; 
      int bias_size = bias_blob.width()*bias_blob.height()*bias_blob.channels()*
	bias_blob.num();
      for(int i = 0; i < bias_size; i++) {
	bias_host_[i] = bias_blob.data(i);
      }
      if(!bias_.defined())
	bias_ = ImageParam(bias_buf.type(), 4);
      bias_.set(bias_buf);
    }

    LOG(INFO) << "Completed initializing " << this->name() << endl;

    initialized = true;
    return initialized;
  }
コード例 #2
0
ファイル: blob.cpp プロジェクト: gaoyongwyl/VmmrAuxLinux
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
  if (reshape) {
    vector<int> shape;
    if (proto.has_num() || proto.has_channels() ||
        proto.has_height() || proto.has_width()) {
      // Using deprecated 4D Blob dimensions --
      // shape is (num, channels, height, width).
      shape.resize(4);
      shape[0] = proto.num();
      shape[1] = proto.channels();
      shape[2] = proto.height();
      shape[3] = proto.width();
    } else {
      shape.resize(proto.shape().dim_size());
      for (int i = 0; i < proto.shape().dim_size(); ++i) {
        shape[i] = proto.shape().dim(i);
      }
    }
    Reshape(shape);
  } else {
    CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
  }
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  for (int i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
  }
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
コード例 #3
0
ファイル: blob.cpp プロジェクト: ZhitingHu/NN
void Blob<Dtype>::FromProto(const BlobProto& proto, const bool init_ps_table) {
  Reshape(proto.num(), proto.channels(), proto.height(), proto.width());

  if (blob_mode_ == BlobProto_BlobMode_GLOBAL) {
    if (init_ps_table) { // initialize ps table
      // update values in ps table
      Dtype* data_vec = ReadPSTable(0);
      for (int i = 0; i < count_; ++i) {
        data_vec[i] = data_vec[i] - proto.data(i);
      }
      diff_->set_cpu_data(data_vec);
      UpdatePSTable();
      //TODO: 2016-6-16
      //// fetch the newest values
      //data_vec = ReadPSTable(0);
      //data_->set_cpu_ps_data(data_vec);
    }
  } else {
    //copy data
    Dtype* data_vec = mutable_cpu_data();
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.data(i);
    }
  }

  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
コード例 #4
0
ファイル: blob.cpp プロジェクト: Xiaomi2008/Caffe_3D_FF
void Blob<Dtype>::FromProto(const BlobProto& proto) {
  LOG(INFO)<<"FromProto size = "<<proto.num()  <<" "<<proto.channels() << " "<<proto.height()<<" "<< proto.width()<<" "<< proto.depth();
  if(proto.depth() ==0)
  {
		LOG(INFO)<< "proto depth is 0, converting to 1 for 2D models to 3D models...";
		Reshape(proto.num(), proto.channels(), proto.height(), proto.width(), 1);
  }else{
		Reshape(proto.num(), proto.channels(), proto.height(), proto.width(), proto.depth());
  }
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  /* for testing only
  Dtype data_vec_sum=0;
  for (int i = 0; i < count_; ++i) {
    data_vec_sum=data_vec_sum+data_vec[i]; 
  }
  LOG(INFO)<<"bolb sum value = "<<data_vec_sum;
  */
  for (size_t i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
	//LOG(INFO)<<"proto.data(i) ="<<proto.data(i);
  }
  //LOG(INFO)<<"proto.data[11870779] ="<<proto.data(11870779);
  //sleep(20);
  //  LOG(INFO)<<"proto.diff_size= "<<proto.diff_size();
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (size_t i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
  
  //if (Caffe::mode()==Caffe::GPU) {
  // gpu_data();
  // }
  
   
}
コード例 #5
0
ファイル: blob.cpp プロジェクト: MaoXu/jade
void Blob<Dtype>::FromProto(const BlobProto& proto) {
  Reshape(proto.num(), proto.channels(), proto.height(), proto.width());
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  for (int i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
  }
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
コード例 #6
0
ファイル: blob.cpp プロジェクト: Xiaomi2008/Caffe_3D_FF
void Blob<Dtype>::FromProtoFC2Conv(const BlobProto& proto){
 // Note that we do not change the shape  of taget layer blob (convolution)
 // reshape other dimention but remain the same number of channels;
  //Reshape(proto.num(), this->channels(), proto.height(), proto.width(), proto.depth());
  // copy data
  Dtype* data_vec = mutable_cpu_data(); 
  size_t proto_count =proto.num()*proto.channels()*proto.height()*proto.width()*proto.depth();
  CHECK_EQ(proto_count,count_);
  for (size_t i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
  }
  //  LOG(INFO)<<"proto.diff_size= "<<proto.diff_size();
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (size_t i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
コード例 #7
0
ファイル: blob.cpp プロジェクト: dieface/caffe
bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
  if (other.has_num() || other.has_channels() || other.has_height()
      || other.has_width()) {
    // Using deprecated 4D Blob dimensions --
    // shape is (num, channels, height, width).
    // Note: we do not use the normal Blob::num(), Blob::channels(), etc.
    // methods as these index from the beginning of the blob shape, where legacy
    // parameter blobs were indexed from the end of the blob shape (e.g., bias
    // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)).
    return shape_.size() <= 4 && LegacyShape(-4) == other.num()
        && LegacyShape(-3) == other.channels()
        && LegacyShape(-2) == other.height()
        && LegacyShape(-1) == other.width();
  }
  vector<int> other_shape(other.shape().dim_size());
  for (int i = 0; i < other.shape().dim_size(); ++i) {
    other_shape[i] = other.shape().dim(i);
  }
  return shape_ == other_shape;
}