Exemplo n.º 1
0
shared_ptr<Layer<Dtype> > GetDeconvolutionLayer(const LayerParameter& param) {
  ConvolutionParameter conv_param = param.convolution_param();
  ConvolutionParameter_Engine engine = conv_param.engine();
#ifdef USE_CUDNN
  bool use_dilation = false;
  for (int i = 0; i < conv_param.dilation_size(); ++i) {
    if (conv_param.dilation(i) > 1) {
      use_dilation = true;
    }
  }
#endif
  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    if (!use_dilation) {
      engine = ConvolutionParameter_Engine_CUDNN;
    }
#endif
  }
  if (engine == ConvolutionParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new DeconvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == ConvolutionParameter_Engine_CUDNN) {
    if (use_dilation) {
      LOG(FATAL) << "CuDNN doesn't support the dilated deconvolution at Layer "
                 << param.name();
    }
    return shared_ptr<Layer<Dtype> >(new CuDNNDeconvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
    throw;  // Avoids missing return warning
  }
}
Exemplo n.º 2
0
shared_ptr<Layer<Dtype> > GetConvolutionLayer(const LayerParameter& param) {
  ConvolutionParameter_Engine engine = param.convolution_param().engine();
  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = ConvolutionParameter_Engine_CUDNN;
#endif
  }
  if (engine == ConvolutionParameter_Engine_CAFFE
      || Caffe::GetDevice(param.device(), true)->backend() == BACKEND_OpenCL
      || checkConvolutionDilated(param.convolution_param())) {
    return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == ConvolutionParameter_Engine_CUDNN) {
    if (checkConvolutionDilated(param.convolution_param())) {
      LOG(FATAL) << "CuDNN doesn't support the dilated convolution at Layer "
                 << param.name();
    }
    return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL)<< "Layer " << param.name() << " has unknown engine.";
  }
}
Exemplo n.º 3
0
shared_ptr<Layer<Dtype> > GetDeconvolutionLayer(
    const LayerParameter& param) {
  ConvolutionParameter conv_param = param.convolution_param();
  ConvolutionParameter_Engine engine = conv_param.engine();

#if defined(MKL2017_SUPPORTED)
  bool use_dilation = false;
  for (int i = 0; i < conv_param.dilation_size(); ++i) {
    if (conv_param.dilation(i) > 1) {
      use_dilation = true;
    }
  }
#endif

  // New, more flexible way of providing engine
  if (engine == ConvolutionParameter_Engine_DEFAULT && param.engine() != "") {
    EngineParser ep(param.engine());

    if (ep.isEngine("CAFFE")) {
      engine = ConvolutionParameter_Engine_CAFFE;
    }
#ifdef MKL2017_SUPPORTED
    else if (!use_dilation && ep.isEngine("MKL2017")) {
      engine = ConvolutionParameter_Engine_MKL2017;
    }
#endif

  }

  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
  }
  if (engine == ConvolutionParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new DeconvolutionLayer<Dtype>(param));
#ifdef MKL2017_SUPPORTED
  } else if (engine == ConvolutionParameter_Engine_MKL2017) {
    if (use_dilation) {
      LOG(FATAL) << "MKL2017 doesn't support the dilated convolution at Layer "
                 << param.name();
    }
    return shared_ptr<Layer<Dtype> >(new MKLDeconvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
  return shared_ptr<Layer<Dtype> >();
}
  bool ConvolutionLayer::copy_trained_layer(const LayerParameter& param) {
    if(param.blobs_size() < 1) return true;
    if(layer_param_.blobs_size() < 1)
      layer_param_.add_blobs()->CopyFrom(param.blobs(0));
    else
      layer_param_.mutable_blobs(0)->CopyFrom(param.blobs(0));
    if(layer_param_.convolution_param().bias_term() && 
       param.convolution_param().bias_term()) {
      if(layer_param_.blobs_size() < 2)
	layer_param_.add_blobs()->CopyFrom(param.blobs(1));
      else
	layer_param_.mutable_blobs(1)->CopyFrom(param.blobs(1));
    }
    initialized = false;
    init();
    return true;
  }
Exemplo n.º 5
0
shared_ptr<Layer<Dtype> > GetConvolutionLayer(
    const LayerParameter& param) {
  ConvolutionParameter_Engine engine = param.convolution_param().engine();
  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = ConvolutionParameter_Engine_CUDNN;
#endif
  }
  if (engine == ConvolutionParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == ConvolutionParameter_Engine_CUDNN) {
    return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
}
Exemplo n.º 6
0
shared_ptr<Layer<Dtype> > GetConvolutionLayer(const LayerParameter& param) {
  ConvolutionParameter_Engine engine = param.convolution_param().engine();
  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    engine = ConvolutionParameter_Engine_CUDNN;
#endif
  }
  if (engine == ConvolutionParameter_Engine_CAFFE
      || Caffe::GetDefaultDeviceContext()->backend() == BACKEND_OpenCL) {
    return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == ConvolutionParameter_Engine_CUDNN) {
    return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL)<< "Layer " << param.name() << " has unknown engine.";
  }
}
Exemplo n.º 7
0
shared_ptr<Layer<Dtype> > GetConvolutionLayer(
    const LayerParameter& param) {
  ConvolutionParameter conv_param = param.convolution_param();
  ConvolutionParameter_Engine engine = conv_param.engine();
#if defined(USE_CUDNN) || defined(USE_MKL2017_AS_DEFAULT_ENGINE)
  bool use_dilation = false;
  for (int i = 0; i < conv_param.dilation_size(); ++i) {
    if (conv_param.dilation(i) > 1) {
      use_dilation = true;
    }
  }
#endif
  if (engine == ConvolutionParameter_Engine_DEFAULT) {
    engine = ConvolutionParameter_Engine_CAFFE;
#ifdef USE_CUDNN
    if (!use_dilation) {
      engine = ConvolutionParameter_Engine_CUDNN;
    }
#elif defined(USE_MKL2017_AS_DEFAULT_ENGINE)
    if (!use_dilation) {
      engine = ConvolutionParameter_Engine_MKL2017;
    }
#endif
  }
  if (engine == ConvolutionParameter_Engine_CAFFE) {
    return shared_ptr<Layer<Dtype> >(new ConvolutionLayer<Dtype>(param));
#ifdef USE_CUDNN
  } else if (engine == ConvolutionParameter_Engine_CUDNN) {
    if (use_dilation) {
      LOG(FATAL) << "CuDNN doesn't support the dilated convolution at Layer "
                 << param.name();
    }
    return shared_ptr<Layer<Dtype> >(new CuDNNConvolutionLayer<Dtype>(param));
#endif
#ifdef MKL2017_SUPPORTED
  } else if (engine == ConvolutionParameter_Engine_MKL2017) {
    return shared_ptr<Layer<Dtype> >(new MKLConvolutionLayer<Dtype>(param));
#endif
  } else {
    LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  }
}