예제 #1
0
void Net::InitWeights(const mxArray *mx_weights_in) { // testing
  size_t num_weights = NumWeights();
  mexAssert(num_weights == mexGetNumel(mx_weights_in), 
    "In InitWeights the vector of weights has the wrong length!");
  weights_.Init(mexGetPointer(mx_weights_in), num_weights);
  size_t offset = 0;
  for (size_t i = 0; i < layers_.size(); ++i) {
    layers_[i]->InitWeights(weights_, offset, false);
  }
}
예제 #2
0
void Net::InitWeights(const mxArray *mx_weights_in, mxArray *&mx_weights) {
  size_t num_weights = NumWeights();
  bool isgen = false;
  if (mx_weights_in != NULL) { // training
    mexAssert(num_weights == mexGetNumel(mx_weights_in), 
      "In InitWeights the vector of weights has the wrong length!");
    mx_weights = mexDuplicateArray(mx_weights_in);    
  } else { // genweights
    mx_weights = mexNewMatrix(1, num_weights);    
    isgen = true;
  }
  weights_.Init(mexGetPointer(mx_weights), num_weights);
  size_t offset = 0;
  for (size_t i = 0; i < layers_.size(); ++i) {
    layers_[i]->InitWeights(weights_, offset, isgen);
  }  
}
예제 #3
0
파일: net.cpp 프로젝트: Geekrick88/ConvNet
void Net::GetWeights(mxArray *&mx_weights) const {  
  size_t num_weights = NumWeights();  
  mx_weights = mexNewMatrix(num_weights, 1);    
  Mat weights_cpu;
  weights_cpu.attach(mexGetPointer(mx_weights), num_weights, 1);
  Mat weights_mat(num_weights, 1);
  #if COMP_REGIME != 2 // CPU
    weights_mat.attach(weights_cpu);
  #endif
  size_t offset = 0;
  for (size_t i = 0; i < layers_.size(); ++i) {
    layers_[i]->GetWeights(weights_mat, offset);
  }  
  #if COMP_REGIME == 2 // GPU
    DeviceToHost(weights_mat, weights_cpu);    
  #endif    
}
예제 #4
0
파일: net.cpp 프로젝트: Geekrick88/ConvNet
void Net::InitWeights(const mxArray *mx_weights_in) {
  bool isgen = false;
	size_t num_weights = NumWeights();  
  MatCPU weights_cpu;
  if (mx_weights_in != NULL) { // training, testing
    mexAssert(num_weights == mexGetNumel(mx_weights_in), 
      "In InitWeights the vector of weights has the wrong length!");        
    mexGetMatrix(mx_weights_in, weights_cpu);
  } else { // genweights        
    isgen = true;
    weights_cpu.resize(num_weights, 1);
  }  
  weights_mat_.resize(num_weights, 1);
  // we can attach (in CPU version), 
  // but don't want to change the initial weights
  weights_mat_ = weights_cpu;    
  weights_.Init(weights_mat_);
  size_t offset = 0;
  for (size_t i = 0; i < layers_.size(); ++i) {
    layers_[i]->InitWeights(weights_, offset, isgen);
  }  
}