Example #1
0
int main(int argc, char** argv) {
  cudaSetDevice(1);
  Caffe::set_mode(Caffe::GPU);
  Caffe::set_phase(Caffe::TEST);

  CHECK(argc == 4);

  LOG(INFO) << "Reading trained parameters";
  NetParameter trained_net_param;
  ReadProtoFromBinaryFile(argv[1], &trained_net_param);

  int quant_size = atoi(argv[3]);
  LOG(INFO) << "Quantizing trained parameters";
  NetParameter quantized_net_param = trained_net_param;
  for (int i = 0; i < quantized_net_param.layers_size(); ++i) {
    LayerParameter* layer = quantized_net_param.mutable_layers(i)->mutable_layer();
    for (int blob_id = 0; blob_id < layer->blobs_size(); ++blob_id) {
      quantizeBlob(layer->mutable_blobs(blob_id), quant_size);
    }
  }

  LOG(INFO) << "Outputting quantized parameters";
  WriteProtoToBinaryFile(quantized_net_param, argv[2]);
  LOG(INFO) << "Finish writing quantized parameters";

  return 0;
}
  bool ConvolutionLayer::copy_trained_layer(const LayerParameter& param) {
    if(param.blobs_size() < 1) return true;
    if(layer_param_.blobs_size() < 1)
      layer_param_.add_blobs()->CopyFrom(param.blobs(0));
    else
      layer_param_.mutable_blobs(0)->CopyFrom(param.blobs(0));
    if(layer_param_.convolution_param().bias_term() && 
       param.convolution_param().bias_term()) {
      if(layer_param_.blobs_size() < 2)
	layer_param_.add_blobs()->CopyFrom(param.blobs(1));
      else
	layer_param_.mutable_blobs(1)->CopyFrom(param.blobs(1));
    }
    initialized = false;
    init();
    return true;
  }