Пример #1
0
 virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
     const vector<Blob<Dtype>*>& top) {
   // Disallow PythonLayer in MultiGPU training stage, due to GIL issues
   // Details: https://github.com/BVLC/caffe/issues/2936
   if (this->phase_ == TRAIN && Caffe::solver_count() > 1
       && !ShareInParallel()) {
     LOG(FATAL) << "PythonLayer is not implemented in Multi-GPU training";
   }
   self_.attr("param_str") = bp::str(
       this->layer_param_.python_param().param_str());
   //self_.attr("phase") = static_cast<int>(this->phase_);
   self_.attr("setup")(bottom, top);
 }
Пример #2
0
 /** @brief Set whether this layer is actually shared by other nets
  *         If ShareInParallel() is true and using more than one GPU and the
  *         net has TRAIN phase, then is_shared should be set true.
  */
 inline void SetShared(bool is_shared) {
   CHECK(ShareInParallel() || !is_shared)
       << type() << "Layer does not support sharing.";
   is_shared_ = is_shared;
 }