Пример #1
0
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
  if (reshape) {
    vector<int> shape;
    if (proto.has_num() || proto.has_channels() ||
        proto.has_height() || proto.has_width()) {
      // Using deprecated 4D Blob dimensions --
      // shape is (num, channels, height, width).
      shape.resize(4);
      shape[0] = proto.num();
      shape[1] = proto.channels();
      shape[2] = proto.height();
      shape[3] = proto.width();
    } else {
      shape.resize(proto.shape().dim_size());
      for (int i = 0; i < proto.shape().dim_size(); ++i) {
        shape[i] = proto.shape().dim(i);
      }
    }
    Reshape(shape);
  } else {
    CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
  }
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  if (proto.double_data_size() > 0) {
    CHECK_EQ(count_, proto.double_data_size());
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.double_data(i);
    }
  } else {
    CHECK_EQ(count_, proto.data_size());
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.data(i);
    }
  }
  if (proto.double_diff_size() > 0) {
    CHECK_EQ(count_, proto.double_diff_size());
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.double_diff(i);
    }
  } else if (proto.diff_size() > 0) {
    CHECK_EQ(count_, proto.diff_size());
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
Пример #2
0
void Blob<Dtype>::FromProto(const BlobProto& proto, const bool init_ps_table) {
  Reshape(proto.num(), proto.channels(), proto.height(), proto.width());

  if (blob_mode_ == BlobProto_BlobMode_GLOBAL) {
    if (init_ps_table) { // initialize ps table
      // update values in ps table
      Dtype* data_vec = ReadPSTable(0);
      for (int i = 0; i < count_; ++i) {
        data_vec[i] = data_vec[i] - proto.data(i);
      }
      diff_->set_cpu_data(data_vec);
      UpdatePSTable();
      //TODO: 2016-6-16
      //// fetch the newest values
      //data_vec = ReadPSTable(0);
      //data_->set_cpu_ps_data(data_vec);
    }
  } else {
    //copy data
    Dtype* data_vec = mutable_cpu_data();
    for (int i = 0; i < count_; ++i) {
      data_vec[i] = proto.data(i);
    }
  }

  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
Пример #3
0
void Blob<Dtype>::scale_diff(Dtype scale_factor) {
  Dtype* diff;
  if (!diff_) { return; }
  switch (diff_->head()) {
  case SyncedMemory::SYNCED_PRV:
  case SyncedMemory::HEAD_AT_PRV:
      diff = mutable_prv_diff();
      caffe_scal(prv_diff_count(), scale_factor, diff);
      break;
  case SyncedMemory::HEAD_AT_CPU:
    diff = mutable_cpu_diff();
    caffe_scal(count_, scale_factor, diff);
    return;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    diff = mutable_gpu_diff();
    caffe_gpu_scal(count_, scale_factor, diff);
    return;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
}
Пример #4
0
void Blob<Dtype>::FromProto(const BlobProto& proto) {
  Reshape(proto.num(), proto.channels(), proto.height(), proto.width());
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  for (int i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
  }
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
Пример #5
0
void Blob<Dtype>::scale_diff(Dtype scale_factor) {
  Dtype* diff;
  if (!diff_) { return; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    diff = mutable_cpu_diff();
    caffe_scal(count_, scale_factor, diff);
    return;
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
}
Пример #6
0
void Blob<Dtype>::FromProtoWithExtraCopy(const BlobProto& proto){
 // reshape other dimention but remain the same number of channels;
  Reshape(proto.num(), this->channels(), proto.height(), proto.width(), proto.depth());
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  int sourceDataSize =proto.data_size();
  for (int i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i%sourceDataSize);
  }
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i%sourceDataSize);
    }
  }
}
Пример #7
0
void Blob<Dtype>::FromProtoFC2Conv(const BlobProto& proto){
 // Note that we do not change the shape  of taget layer blob (convolution)
 // reshape other dimention but remain the same number of channels;
  //Reshape(proto.num(), this->channels(), proto.height(), proto.width(), proto.depth());
  // copy data
  Dtype* data_vec = mutable_cpu_data(); 
  size_t proto_count =proto.num()*proto.channels()*proto.height()*proto.width()*proto.depth();
  CHECK_EQ(proto_count,count_);
  for (size_t i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
  }
  //  LOG(INFO)<<"proto.diff_size= "<<proto.diff_size();
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (size_t i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
}
Пример #8
0
void Blob<Dtype>::FromProto(const BlobProto& proto, bool need_reshape = true){
	//copy shape
	if (need_reshape){
		vector<int> shape;
		shape.resize(proto.shape().dim_size());
		for (int i = 0; i < shape.size(); i++)
			shape[i] = proto.shape().dim(i);
		reshape(shape);
	}
	if (proto.data_size()>0){
		CHECK_EQ(proto.data_size(), count());
		Dtype *data = mutable_cpu_data();
		for (int i = 0; i < count_; i++) data[i] = proto.data(i);
	}
	if (proto.diff_size()>0){
		CHECK_EQ(proto.diff_size(), count());
		Dtype *diff = mutable_cpu_diff();
		for (int i = 0; i < count_; i++) diff[i] = proto.diff(i);
	}
}
Пример #9
0
void Blob<Dtype>::FromProto(const BlobProto& proto) {
  LOG(INFO)<<"FromProto size = "<<proto.num()  <<" "<<proto.channels() << " "<<proto.height()<<" "<< proto.width()<<" "<< proto.depth();
  if(proto.depth() ==0)
  {
		LOG(INFO)<< "proto depth is 0, converting to 1 for 2D models to 3D models...";
		Reshape(proto.num(), proto.channels(), proto.height(), proto.width(), 1);
  }else{
		Reshape(proto.num(), proto.channels(), proto.height(), proto.width(), proto.depth());
  }
  // copy data
  Dtype* data_vec = mutable_cpu_data();
  /* for testing only
  Dtype data_vec_sum=0;
  for (int i = 0; i < count_; ++i) {
    data_vec_sum=data_vec_sum+data_vec[i]; 
  }
  LOG(INFO)<<"bolb sum value = "<<data_vec_sum;
  */
  for (size_t i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
	//LOG(INFO)<<"proto.data(i) ="<<proto.data(i);
  }
  //LOG(INFO)<<"proto.data[11870779] ="<<proto.data(11870779);
  //sleep(20);
  //  LOG(INFO)<<"proto.diff_size= "<<proto.diff_size();
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (size_t i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }
  }
  
  //if (Caffe::mode()==Caffe::GPU) {
  // gpu_data();
  // }
  
   
}
Пример #10
0
void Blob<Dtype>::scale_diff(Dtype scale_factor) {
  Dtype* diff;
  if (!diff_) {
    return;
  }
  switch (diff_->head()) {
    case SyncedMemory::HEAD_AT_CPU: {
      diff = mutable_cpu_diff();
      caffe_scal(count_, scale_factor, diff);
      return;
    }
    case SyncedMemory::HEAD_AT_GPU:
    case SyncedMemory::SYNCED: {
#ifndef CPU_ONLY
      diff = mutable_gpu_diff();
      if (device_->backend() == Backend::BACKEND_CUDA) {
#ifdef USE_CUDA
        caffe_gpu_scal(count_, scale_factor, diff);
#endif
      } else {
#ifdef USE_GREENTEA
        greentea_gpu_scal(device_->id(), count_, scale_factor,
                          (cl_mem) diff, 0);
#endif
      }
      return;
#else
      NO_GPU;
#endif
    }
    case SyncedMemory::UNINITIALIZED:
      return;
    default:
      LOG(FATAL)<< "Unknown SyncedMemory head state: " << diff_->head();
    }
  }