Exemplo n.º 1
0
 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
                            const bool propagate_down,
                            vector<Blob<Dtype>*>* bottom)
 {
   // LOG(WARNING) << "Using CPU code as backup.";
   return Backward_cpu(top, propagate_down, bottom);
 }
Exemplo n.º 2
0
inline Dtype Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
		const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
	switch (Caffe::mode()) {
	case Caffe::CPU:
		return Backward_cpu(top, propagate_down, bottom);
	case Caffe::GPU:
		return Backward_gpu(top, propagate_down, bottom);
	default:
		LOG(FATAL)<< "Unknown caffe mode.";
	}
};
Exemplo n.º 3
0
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  switch (Caffe::mode()) {
  case Caffe::CPU:
    Backward_cpu(top, propagate_down, bottom);
    break;
  case Caffe::GPU:
    Backward_gpu(top, propagate_down, bottom);
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}
Exemplo n.º 4
0
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
#ifdef USE_MLSL
  if (Bypass(bottom, top)) return;
#endif
  switch (Caffe::mode()) {
  case Caffe::CPU:
    Backward_cpu(top, propagate_down, bottom);
    break;
  case Caffe::GPU:
    Backward_gpu(top, propagate_down, bottom);
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}
Exemplo n.º 5
0
void LibProcessLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
  const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {

  vector<void *> data_bottom;
  vector<const void *> data_top;

  if (iface_.backward_gpu) {
    for (int i=0; i<bottom.size(); i++)
      data_bottom.push_back(bottom[i]->mutable_gpu_data());

    for (int i=0; i<top.size(); i++)
      data_top.push_back(top[i]->gpu_data());

    iface_.backward_gpu(libuserdata_, data_top, propagate_down, data_bottom);
  }
  else Backward_cpu(top, propagate_down, bottom);
}
Exemplo n.º 6
0
void TanHLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    vector<Blob<Dtype>*>* bottom) {
  if (propagate_down[0]) {
    const Dtype* top_data = top[0]->cpu_data();
    const Dtype* top_diff = top[0]->cpu_diff();
    Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
    const int count = (*bottom)[0]->count();
    //Dtype tanhx;

    Backward_cpu(count, top_data, top_diff, bottom_diff);
    /*
    for (int i = 0; i < count; ++i) {
      tanhx = top_data[i];
      bottom_diff[i] = top_diff[i] * (1 - tanhx * tanhx);
    }
    */
  }
}
Exemplo n.º 7
0
 /**
  * @brief Using the GPU device, compute the gradients for any parameters and
  *        for the bottom blobs if propagate_down is true.
  *        Fall back to Backward_cpu() if unavailable.
  */
 virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
     const vector<bool>& propagate_down,
     const vector<Blob<Dtype>*>& bottom) {
   // LOG(WARNING) << "Using CPU code as backup.";
   Backward_cpu(top, propagate_down, bottom);
 }
Exemplo n.º 8
0
 virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
                           const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
 {
     return Backward_cpu(top,propagate_down,bottom);
 }
void EuclideanLossHeatmapLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
        const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
    Backward_cpu(top, propagate_down, bottom);
}
Exemplo n.º 10
0
void MultiStageCRFLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
                                             const vector<bool>& propagate_down,
                                             const vector<Blob<Dtype>*>& bottom)
{
    Backward_cpu(top, propagate_down, bottom);
}