コード例 #1
0
ファイル: test_lrn_layer.cpp プロジェクト: ChenglongChen/DSN
void LRNLayerTest<Dtype>::ReferenceLRNForward(
    const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
    Blob<Dtype>* blob_top) {
  blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
      blob_bottom.height(), blob_bottom.width());
  const Dtype* bottom_data = blob_bottom.cpu_data();
  Dtype* top_data = blob_top->mutable_cpu_data();
  Dtype alpha = layer_param.alpha();
  Dtype beta = layer_param.beta();
  int size = layer_param.local_size();
  for (int n = 0; n < blob_bottom.num(); ++n) {
    for (int c = 0; c < blob_bottom.channels(); ++c) {
      for (int h = 0; h < blob_bottom.height(); ++h) {
        for (int w = 0; w < blob_bottom.width(); ++w) {
          int c_start = c - (size - 1) / 2;
          int c_end = min(c_start + size, blob_bottom.channels());
          c_start = max(c_start, 0);
          Dtype scale = 1.;
          for (int i = c_start; i < c_end; ++i) {
            Dtype value = blob_bottom.data_at(n, i, h, w);
            scale += value * value * alpha / size;
          }
          *(top_data + blob_top->offset(n, c, h, w)) =
            blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
        }
      }
    }
  }
}
コード例 #2
0
ファイル: blob.cpp プロジェクト: Vanova/caffe-compact
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
  if (num_ != source.num() || channels_ != source.channels() ||
      height_ != source.height() || width_ != source.width()) {
    if (reshape) {
      Reshape(source.num(), source.channels(), source.height(), source.width());
    } else {
      LOG(FATAL) << "Trying to copy blobs of different sizes.";
    }
  }
  switch (Caffe::mode()) {
#if 0
  case Caffe::GPU:
    if (copy_diff) {
      CUDA_CHECK(cudaMemcpy(diff_->mutable_gpu_data(), source.gpu_diff(),
          sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice));
    } else {
      CUDA_CHECK(cudaMemcpy(data_->mutable_gpu_data(), source.gpu_data(),
          sizeof(Dtype) * count_, cudaMemcpyDeviceToDevice));
    }
    break;
#endif
  case Caffe::CPU:
    if (copy_diff) {
      memcpy(diff_->mutable_cpu_data(), source.cpu_diff(),
          sizeof(Dtype) * count_);
    } else {
      memcpy(data_->mutable_cpu_data(), source.cpu_data(),
        sizeof(Dtype) * count_);
    }
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}
コード例 #3
0
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
  const Dtype* top_diff = top[0]->cpu_diff();
  if (concat_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < bottom->size(); ++i) {
      Blob<Dtype>* blob = (*bottom)[i];
      if (propagate_down[i]) {
        Dtype* bottom_diff = blob->mutable_cpu_diff();
        caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num),
                   bottom_diff);
      }
      offset_num += blob->num();
    }
  } else if (concat_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < bottom->size(); ++i) {
      Blob<Dtype>* blob = (*bottom)[i];
      if (propagate_down[i]) {
        Dtype* bottom_diff = blob->mutable_cpu_diff();
        int num_elem = blob->channels()*blob->height()*blob->width();
        for (int n = 0; n < num_; ++n) {
          caffe_copy(num_elem, top_diff + top[0]->offset(n, offset_channel),
                     bottom_diff + blob->offset(n));
        }
      }
      offset_channel += blob->channels();
    }
  }  // concat_dim_ is guaranteed to be 0 or 1 by SetUp.
}
コード例 #4
0
ファイル: blob.cpp プロジェクト: MaoXu/jade
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
  if (num_ != source.num() || channels_ != source.channels() ||
      height_ != source.height() || width_ != source.width()) {
    if (reshape) {
      Reshape(source.num(), source.channels(), source.height(), source.width());
    } else {
      LOG(FATAL) << "Trying to copy blobs of different sizes.";
    }
  }
  switch (Caffe::mode()) {
  case Caffe::GPU:
    if (copy_diff) {
      caffe_copy(count_, source.gpu_diff(),
          static_cast<Dtype*>(diff_->mutable_gpu_data()));
    } else {
      caffe_copy(count_, source.gpu_data(),
          static_cast<Dtype*>(data_->mutable_gpu_data()));
    }
    break;
  case Caffe::CPU:
    if (copy_diff) {
      caffe_copy(count_, source.cpu_diff(),
          static_cast<Dtype*>(diff_->mutable_cpu_data()));
    } else {
      caffe_copy(count_, source.cpu_data(),
          static_cast<Dtype*>(data_->mutable_cpu_data()));
    }
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}
コード例 #5
0
ファイル: slice_layer.cpp プロジェクト: FangZhenpeng/caffe
void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
  if (!propagate_down[0]) { return; }
  Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
  if (slice_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < top.size(); ++i) {
      Blob<Dtype>* blob = top[i];
      const Dtype* top_diff = blob->cpu_diff();
      caffe_copy(blob->count(), top_diff,
                 bottom_diff + (*bottom)[0]->offset(offset_num));
      offset_num += blob->num();
    }
  } else if (slice_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < top.size(); ++i) {
      Blob<Dtype>* blob = top[i];
      const Dtype* top_diff = blob->cpu_diff();
      const int num_elem = blob->channels() * blob->height() * blob->width();
      for (int n = 0; n < num_; ++n) {
        caffe_copy(num_elem, top_diff + blob->offset(n),
                   bottom_diff + (*bottom)[0]->offset(n, offset_channel));
      }
      offset_channel += blob->channels();
    }
  }  // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
}
コード例 #6
0
ファイル: slice_layer.cpp プロジェクト: FangZhenpeng/caffe
Dtype SliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  const Dtype* bottom_data = bottom[0]->mutable_cpu_data();
  if (slice_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < top->size(); ++i) {
      Blob<Dtype>* blob = (*top)[i];
      Dtype* top_data = blob->mutable_cpu_data();
      caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num),
                 top_data);
      offset_num += blob->num();
    }
  } else if (slice_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < top->size(); ++i) {
      Blob<Dtype>* blob = (*top)[i];
      Dtype* top_data = blob->mutable_cpu_data();
      const int num_elem = blob->channels() * blob->height() * blob->width();
      for (int n = 0; n < num_; ++n) {
        caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel),
                   top_data + blob->offset(n));
      }
      offset_channel += blob->channels();
    }
  }  // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
  return Dtype(0.);
}
コード例 #7
0
void LRNLayerTest<Dtype>::ReferenceLRNForward(
    const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
    Blob<Dtype>* blob_top) {
  blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
      blob_bottom.height(), blob_bottom.width());
  Dtype* top_data = blob_top->mutable_cpu_data();
  LRNParameter lrn_param = layer_param.lrn_param();
  Dtype alpha = lrn_param.alpha();
  Dtype beta = lrn_param.beta();
  int size = lrn_param.local_size();
  switch (lrn_param.norm_region()) {
  case LRNParameter_NormRegion_ACROSS_CHANNELS:
    for (int n = 0; n < blob_bottom.num(); ++n) {
      for (int c = 0; c < blob_bottom.channels(); ++c) {
        for (int h = 0; h < blob_bottom.height(); ++h) {
          for (int w = 0; w < blob_bottom.width(); ++w) {
            int c_start = c - (size - 1) / 2;
            int c_end = min(c_start + size, blob_bottom.channels());
            c_start = max(c_start, 0);
            Dtype scale = 1.;
            for (int i = c_start; i < c_end; ++i) {
              Dtype value = blob_bottom.data_at(n, i, h, w);
              scale += value * value * alpha / size;
            }
            *(top_data + blob_top->offset(n, c, h, w)) =
              blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
          }
        }
      }
    }
    break;
  case LRNParameter_NormRegion_WITHIN_CHANNEL:
    for (int n = 0; n < blob_bottom.num(); ++n) {
      for (int c = 0; c < blob_bottom.channels(); ++c) {
        for (int h = 0; h < blob_bottom.height(); ++h) {
          int h_start = h - (size - 1) / 2;
          int h_end = min(h_start + size, blob_bottom.height());
          h_start = max(h_start, 0);
          for (int w = 0; w < blob_bottom.width(); ++w) {
            Dtype scale = 1.;
            int w_start = w - (size - 1) / 2;
            int w_end = min(w_start + size, blob_bottom.width());
            w_start = max(w_start, 0);
            for (int nh = h_start; nh < h_end; ++nh) {
              for (int nw = w_start; nw < w_end; ++nw) {
                Dtype value = blob_bottom.data_at(n, c, nh, nw);
                scale += value * value * alpha / (size * size);
              }
            }
            *(top_data + blob_top->offset(n, c, h, w)) =
              blob_bottom.data_at(n, c, h, w) / pow(scale, beta);
          }
        }
      }
    }
    break;
  default:
    LOG(FATAL) << "Unknown normalization region.";
  }
}
コード例 #8
0
TYPED_TEST(MultipleInnerProductLayerTest, TestSetup) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  MultipleInnerProductParameter* mip_param = layer_param.mutable_multiple_inner_product_param();
  mip_param->set_num_layer(3);

  mip_param->add_num_outputs(NUM_OUT1);
  mip_param->add_num_outputs(NUM_OUT2);
  mip_param->add_num_outputs(NUM_OUT3);

  MultipleInnerProductLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  for(int i = 0; i < this->blob_top_vec_.size(); i++)
  {
    Blob<Dtype>* blob = this->blob_top_vec_[i];
    EXPECT_EQ(blob->num(), NUM);
    EXPECT_EQ(blob->channels(), NUM_OUT3);
    EXPECT_EQ(blob->height(), 1);
    EXPECT_EQ(blob->width(), 1);
  }
  EXPECT_EQ(layer.blobs().size(), 3 * 2);
  EXPECT_EQ(layer.blobs()[0]->shape()[0], NUM_OUT1);
  EXPECT_EQ(layer.blobs()[2]->shape()[0], NUM_OUT2);
  EXPECT_EQ(layer.blobs()[4]->shape()[0], NUM_OUT3);

}
コード例 #9
0
TYPED_TEST(DataTransformTest, TestCropSize) {
  TransformationParameter transform_param;
  const bool unique_pixels = false;  // all pixels the same equal to label
  const int_tp label = 0;
  const int_tp channels = 3;
  const int_tp height = 4;
  const int_tp width = 5;
  const int_tp crop_size = 2;

  transform_param.set_crop_size(crop_size);
  Datum datum;
  FillDatum(label, channels, height, width, unique_pixels, &datum);
  DataTransformer<TypeParam>* transformer =
      new DataTransformer<TypeParam>(transform_param, TEST,
                                     Caffe::GetDefaultDevice());
  transformer->InitRand();
  Blob<TypeParam>* blob =
      new Blob<TypeParam>(1, channels, crop_size, crop_size);
  for (int_tp iter = 0; iter < this->num_iter_; ++iter) {
    transformer->Transform(datum, blob);
    EXPECT_EQ(blob->num(), 1);
    EXPECT_EQ(blob->channels(), datum.channels());
    EXPECT_EQ(blob->height(), crop_size);
    EXPECT_EQ(blob->width(), crop_size);
    for (int_tp j = 0; j < blob->count(); ++j) {
      EXPECT_EQ(blob->cpu_data()[j], label);
    }
  }
}
コード例 #10
0
void reducedRTFLayer<Dtype>::FillBlob(Blob<Dtype> &toFill, bool isRand, Dtype fillerConstant){

    int N,C,H,W;

    N = toFill.num();
    C = toFill.channels();
    H = toFill.height();
    W = toFill.width();
    Dtype* toFillPtr = toFill.mutable_cpu_data();
    for(int n=0; n<N; ++n){
        for(int c=0; c<C; ++c){
            for(int h=0; h<H; ++h){
                for(int w=0; w<W; ++w){
                    if(isRand){
                        toFillPtr[n*C*H*W + c*H*W + h*W + w] = (Dtype)(rand()/RAND_MAX);
                    }
                    else{
                        toFillPtr[n*C*H*W + c*H*W + h*W + w] = fillerConstant;
                    }
                }
            }
        }
    }

}
コード例 #11
0
void HDF5OutputLayerTest<TypeParam>::CheckBlobEqual(const Blob<Dtype>& b1,
                                                    const Blob<Dtype>& b2) {
  EXPECT_EQ(b1.num(), b2.num());
  EXPECT_EQ(b1.channels(), b2.channels());
  EXPECT_EQ(b1.height(), b2.height());
  EXPECT_EQ(b1.width(), b2.width());
  for (int n = 0; n < b1.num(); ++n) {
    for (int c = 0; c < b1.channels(); ++c) {
      for (int h = 0; h < b1.height(); ++h) {
        for (int w = 0; w < b1.width(); ++w) {
          EXPECT_EQ(b1.data_at(n, c, h, w), b2.data_at(n, c, h, w));
        }
      }
    }
  }
}
コード例 #12
0
// Load the mean file in binaryproto format.
int DeepFeatureExtractor::SetMean(
  const std::string& meanfile) 
{
  BlobProto blob_proto;
  ReadProtoFromBinaryFileOrDie(meanfile.c_str(), &blob_proto);
  // Convert from BlobProto to Blob<float> 
  Blob<float> meanblob;
  meanblob.FromProto(blob_proto);
  CHECK_EQ(meanblob.channels(), m_num_channels)
    << "Number of channels of mean file doesn't match input layer.";
  // The format of the mean file is planar 32-bit float BGR or grayscale.
  std::vector<cv::Mat> channels;
  float* data = meanblob.mutable_cpu_data();
  for (unsigned int i = 0; i < m_num_channels; ++i) {
    // Extract an individual channel.
    cv::Mat channel(meanblob.height(), meanblob.width(), CV_32FC1, data);
    channels.push_back(channel);
    data += meanblob.height() * meanblob.width();
  }
  // Merge the separate channels into a single image.
  cv::Mat mean;
  cv::merge(channels, mean);
  // Compute the global mean pixel value and create a mean image
  // filled with this value. 
  cv::Scalar channel_mean = cv::mean(mean);
  m_mean = cv::Mat(m_input_geometry, mean.type(), channel_mean);
  return 0;
}
コード例 #13
0
void PoolingLayerImpl::avePooling_cpu(Blob &src, Blob &dst)
{
    for (int n = 0; n < src.num(); ++n)
    {
        for (int c = 0; c < src.channels(); ++c)
        {
            const float *srcData = src.ptrf(n, c);
            float *dstData = dst.ptrf(n, c);

            for (int ph = 0; ph < out.height; ++ph)
            {
                for (int pw = 0; pw < out.width; ++pw)
                {
                    int hstart = ph * stride.height - pad.height;
                    int wstart = pw * stride.width - pad.width;
                    int hend = min(hstart + kernel.height, inp.height + pad.height);
                    int wend = min(wstart + kernel.width, inp.width + pad.width);
                    int poolSize = (hend - hstart) * (wend - wstart);
                    hstart = max(hstart, 0);
                    wstart = max(wstart, 0);
                    hend = min(hend, inp.height);
                    wend = min(wend, inp.width);

                    dstData[ph * out.width + pw] = 0.f;

                    for (int h = hstart; h < hend; ++h)
                        for (int w = wstart; w < wend; ++w)
                            dstData[ph * out.width + pw] += srcData[h * inp.width + w];

                    dstData[ph * out.width + pw] /= poolSize;
                }
            }
        }
    }
}
コード例 #14
0
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
  const Dtype* top_diff = top[0]->cpu_diff();
  if (concat_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < bottom->size(); ++i) {
      Blob<Dtype>* blob = (*bottom)[i];
      Dtype* bottom_diff = blob->mutable_cpu_diff();
      caffe_copy(blob->count(),
        top_diff+top[0]->offset(offset_num), bottom_diff);
      offset_num += blob->num();
    }
  } else if (concat_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < bottom->size(); ++i) {
      Blob<Dtype>* blob = (*bottom)[i];
      Dtype* bottom_diff = blob->mutable_cpu_diff();
      int num_elem = blob->channels()*blob->height()*blob->width();
      for (int n = 0; n < num_; ++n) {
        caffe_copy(num_elem, top_diff+top[0]->offset(n, offset_channel),
          bottom_diff+blob->offset(n));
      }
      offset_channel += blob->channels();
    }
  }else if (concat_dim_ == 4){// lipengyu add
	  int top_bias = 0;
	  for(int n = 0 ; n < num_ ; n++)
	  {
		  for(int i = 0 ; i < bottom->size() ; i++)
		  {
			   Blob<Dtype>* blob = (*bottom)[i];
			   Dtype* bottom_diff = blob->mutable_cpu_diff();
			   int num_elem = blob->channels()*blob->height()*blob->width();

			   caffe_copy(num_elem, top_diff+ top_bias,//top[0]->offset(n, offset_channel),
				  bottom_diff+blob->offset(n));

			   top_bias += num_elem;
		  }
	  }
  }else {
    LOG(FATAL) << "concat_dim along dim" << concat_dim_ <<
      " not implemented yet";
  }
}
コード例 #15
0
void WordvecLayerTest<TypeParam>::ReferenceWordvecForward(
    const Blob<Dtype>& blob_bottom, const LayerParameter& layer_param,
    Blob<Dtype>* blob_top) {
  typedef typename TypeParam::Dtype Dtype;
  blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(),
      blob_bottom.height(), blob_bottom.width());
  Dtype* top_data = blob_top->mutable_cpu_data();
  WordvecParameter wordvec_param = layer_param.wordvec_param();
}
コード例 #16
0
void SliceLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  count_ = 0;
  num_ = bottom[0]->num();
  channels_ = bottom[0]->channels();
  height_ = bottom[0]->height();
  width_ = bottom[0]->width();
  depth_ = bottom[0]->depth();
  LOG(INFO)<<"slice start reshape";
  if (slice_point_.size() != 0) {
    CHECK_EQ(slice_point_.size(), top->size() - 1);
    if (slice_dim_ == 0) {
      CHECK_LE(top->size(), num_);
    } else {
      CHECK_LE(top->size(), channels_);
    }
    int prev = 0;
    vector<int> slices;
    for (int i = 0; i < slice_point_.size(); ++i) {
      CHECK_GT(slice_point_[i], prev);
      slices.push_back(slice_point_[i] - prev);
      prev = slice_point_[i];
    }
    if (slice_dim_ == 0) {
      slices.push_back(num_ - prev);
      for (int i = 0; i < top->size(); ++i) {
        (*top)[i]->Reshape(slices[i], channels_, height_, width_,depth_);
         count_ += (*top)[i]->count();
      }
    } else {
      slices.push_back(channels_ - prev);
      for (int i = 0; i < top->size(); ++i) {
        (*top)[i]->Reshape(num_, slices[i], height_, width_, depth_);
         count_ += (*top)[i]->count();
      }
    }
  } else {
    if (slice_dim_ == 0) {
      CHECK_EQ(num_ % top->size(), 0)
          << "Number of top blobs (" << top->size() << ") "
          << "should evenly divide input num ( " << num_ << ")";
      num_ = num_ / top->size();
    } else {
      CHECK_EQ(channels_ % top->size(), 0)
          << "Number of top blobs (" << top->size() << ") "
          << "should evenly divide input channels ( " << channels_ << ")";
      channels_ = channels_ / top->size();
    }
    for (int i = 0; i < top->size(); ++i) {
      (*top)[i]->Reshape(num_, channels_, height_, width_,depth_);
      count_ += (*top)[i]->count();
    }
  }
  CHECK_EQ(count_, bottom[0]->count());
  Blob<Dtype>* blob = (*top)[0];
  LOG(INFO)<<"top(0) num =" <<blob->num() << "channels ="<<blob->channels();
}
コード例 #17
0
ファイル: io.cpp プロジェクト: MichaelXin/ARCH2
void hdf5_save_nd_dataset<double>(
    const hid_t file_id, const string& dataset_name, const Blob<double>& blob) {
  hsize_t dims[HDF5_NUM_DIMS];
  dims[0] = blob.num();
  dims[1] = blob.channels();
  dims[2] = blob.height();
  dims[3] = blob.width();
  herr_t status = H5LTmake_dataset_double(
      file_id, dataset_name.c_str(), HDF5_NUM_DIMS, dims, blob.cpu_data());
  CHECK_GE(status, 0) << "Failed to make double dataset " << dataset_name;
}
コード例 #18
0
    void ConvolutionLayer::computeInpOutShape(const Blob &inpBlob)
    {
        inpH = inpBlob.rows();
        inpW = inpBlob.cols();
        inpCn = inpBlob.channels();

        outH = (inpH + 2 * padH - kerH) / strideH + 1;
        outW = (inpW + 2 * padW - kerW) / strideW + 1;
        outCn = numOutput;

        topH = outH; topW = outW; topCn = outCn;
    }
コード例 #19
0
    void DeConvolutionLayer::computeInpOutShape(const Blob &inpBlob)
    {
        outH = inpBlob.rows();
        outW = inpBlob.cols();
        outCn = inpBlob.channels();

        inpH = strideH * (outH - 1) + kerH - 2 * padH;
        inpW = strideW * (outW - 1) + kerW - 2 * padW;
        inpCn = numOutput;

        topH = inpH; topW = inpW; topCn = inpCn;
    }
コード例 #20
0
static void colorizeSegmentation(Blob &score, Mat &segm, Mat &legend, vector<String> &classNames)
{
    const int rows = score.rows();
    const int cols = score.cols();
    const int chns = score.channels();

    vector<Vec3i> colors;
    RNG rng(12345678);

    cv::Mat maxCl(rows, cols, CV_8UC1);
    cv::Mat maxVal(rows, cols, CV_32FC1);
    for (int ch = 0; ch < chns; ch++)
    {
        colors.push_back(Vec3i(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256)));
        for (int row = 0; row < rows; row++)
        {
            const float *ptrScore = score.ptrf(0, ch, row);
            uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
            float *ptrMaxVal = maxVal.ptr<float>(row);
            for (int col = 0; col < cols; col++)
            {
                if (ptrScore[col] > ptrMaxVal[col])
                {
                    ptrMaxVal[col] = ptrScore[col];
                    ptrMaxCl[col] = ch;
                }
            }
        }
    }

    segm.create(rows, cols, CV_8UC3);
    for (int row = 0; row < rows; row++)
    {
        const uchar *ptrMaxCl = maxCl.ptr<uchar>(row);
        cv::Vec3b *ptrSegm = segm.ptr<cv::Vec3b>(row);
        for (int col = 0; col < cols; col++)
        {
            ptrSegm[col] = colors[ptrMaxCl[col]];
        }
    }

    if (classNames.size() == colors.size())
    {
        int blockHeight = 30;
        legend.create(blockHeight*classNames.size(), 200, CV_8UC3);
        for(int i = 0; i < classNames.size(); i++)
        {
            cv::Mat block = legend.rowRange(i*blockHeight, (i+1)*blockHeight);
            block = colors[i];
            putText(block, classNames[i], Point(0, blockHeight/2), FONT_HERSHEY_SIMPLEX, 0.5, Scalar());
        }
    }
}
コード例 #21
0
ファイル: blob.cpp プロジェクト: ZhitingHu/NN
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
  if (blob_mode_ == BlobProto_BlobMode_GLOBAL) {
    if (!copy_diff) {
      LOG(FATAL) << "Currently Petuum Caffe does not support "
                 << "copying data to blobs with GLOBAL mode";
    } // TODO: support CopyFrom( copy_diff == false )
  }
  if (num_ != source.num() || channels_ != source.channels() ||
      height_ != source.height() || width_ != source.width()) {
    if (reshape) {
      Reshape(source.num(), source.channels(), source.height(), source.width());
    } else {
      LOG(FATAL) << "Trying to copy blobs of different sizes.";
    }
  }
  switch (Caffe::mode()) {
  case Caffe::GPU:
    if (copy_diff) {
      caffe_copy(count_, source.gpu_diff(),
          static_cast<Dtype*>(diff_->mutable_gpu_data()));
    } else {
      caffe_copy(count_, source.gpu_data(),
          static_cast<Dtype*>(data_->mutable_gpu_data()));
    }
    break;
  case Caffe::CPU:
    if (copy_diff) {
      caffe_copy(count_, source.cpu_diff(),
          static_cast<Dtype*>(diff_->mutable_cpu_data()));
    } else {
      caffe_copy(count_, source.cpu_data(),
          static_cast<Dtype*>(data_->mutable_cpu_data()));
    }
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}
コード例 #22
0
ファイル: infogain_loss_layer.cpp プロジェクト: xygorn/caffe
void InfogainLossLayer<Dtype>::Reshape(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
    LossLayer<Dtype>::Reshape(bottom, top);
    Blob<Dtype>* infogain = NULL;
    if (bottom.size() < 3) {
        infogain = &infogain_;
    } else {
        infogain = bottom[2];
    }
    CHECK_EQ(bottom[1]->count(1), 1);
    const int num = bottom[0]->num();
    const int dim = bottom[0]->count() / num;
    CHECK_EQ(infogain->num(), 1);
    CHECK_EQ(infogain->channels(), 1);
    CHECK_EQ(infogain->height(), dim);
    CHECK_EQ(infogain->width(), dim);
}
コード例 #23
0
cv::Mat Blob2Mat(Blob<Dtype>& data_mean){
	const Dtype* mean = data_mean.cpu_data();

	const int height = data_mean.height();
	const int width = data_mean.width();
	const int channels = data_mean.channels();

	CHECK_EQ( channels, 3);
	cv::Mat M(height, width, CV_32FC3);
	for(int c=0; c< channels;++c){
		for(int h=0;h<height;++h){
			for(int w=0;w<width;++w){
				M.at<cv::Vec3f>(h,w)[c] = static_cast<float>(mean[(c*height+h)*width+w]);
			}
		}
	}
	LOG(ERROR) << "ret[0][0]:" << M.at<cv::Vec3f>(0,0)[0] << " mean[0]:" << mean[0];

	return M;
}
コード例 #24
0
TYPED_TEST(MultipleInnerProductLayerTest, MultipleInnerProductLayerTestLoadWieghtsFromFile) {
  typedef typename TypeParam::Dtype Dtype;
  
  const int num_layer = 5;
  const int num_outputs[] = {3,4,5,6,7};

  LayerParameter layer_param;
  MultipleInnerProductParameter* mip_param = layer_param.mutable_multiple_inner_product_param();
  mip_param->set_num_layer(num_layer);
  for (int i = 0; i < num_layer; ++i){
    mip_param->add_num_outputs(num_outputs[i]);
  }

  MultipleInnerProductLayer<Dtype> layer(layer_param);

  
  layer.blobs().resize(num_layer * 2);
  
  vector<int> weight_shape(2, 0);
  weight_shape[0] = CHENNAL*HEIGHT*WIDTH; // in
  vector<int> bias_shape(1, 0);
  layer.blobs().resize(num_layer * 2);
  for (int i = 0; i < num_layer; i ++){
    weight_shape[1] = weight_shape[0];
    weight_shape[0] = num_outputs[i];
    bias_shape[0] = num_outputs[i];
    layer.blobs()[i * 2].reset(new Blob<Dtype>(weight_shape));
    layer.blobs()[i * 2 + 1].reset(new Blob<Dtype>(bias_shape));
  }
  layer.Reshape(this->blob_bottom_vec_, this->blob_top_vec_);

  Blob<Dtype>* blob = this->blob_top_vec_[0];
  EXPECT_EQ(blob->num(), NUM);
  EXPECT_EQ(blob->channels(), num_outputs[num_layer - 1]);
  EXPECT_EQ(blob->height(), 1);
  EXPECT_EQ(blob->width(), 1);

  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
}
コード例 #25
0
void PoolingLayerImpl::maxPooling_cpu(Blob &src, Blob &dst)
{
    CV_DbgAssert(dst.rows() == out.height && dst.cols() == out.width);

    for (int n = 0; n < src.num(); ++n)
    {
        for (int c = 0; c < src.channels(); ++c)
        {
            const float *srcData = src.ptrf(n, c);
            float *dstData = dst.ptrf(n, c);

            for (int ph = 0; ph < out.height; ++ph)
            {
                for (int pw = 0; pw < out.width; ++pw)
                {
                    int hstart = ph * stride.height - pad.height;
                    int wstart = pw * stride.width - pad.width;
                    int hend = min(hstart + kernel.height, inp.height);
                    int wend = min(wstart + kernel.width, inp.width);
                    hstart = max(hstart, 0);
                    wstart = max(wstart, 0);
                    const int poolIndex = ph * out.width + pw;
                    float max_val = -FLT_MAX;

                    for (int h = hstart; h < hend; ++h)
                        for (int w = wstart; w < wend; ++w)
                        {
                            const int index = h * inp.width + w;
                            if (srcData[index] > max_val)
                                max_val = srcData[index];
                        }

                    dstData[poolIndex] = max_val;
                }
            }
        }
    }
}
コード例 #26
0
TYPED_TEST(DataTransformTest, TestEmptyTransformUniquePixels) {
  TransformationParameter transform_param;
  const bool unique_pixels = true;  // pixels are consecutive ints [0,size]
  const int label = 0;
  const int channels = 3;
  const int height = 4;
  const int width = 5;

  Datum datum;
  FillDatum(label, channels, height, width, unique_pixels, &datum);
  Blob<TypeParam>* blob = new Blob<TypeParam>(1, 3, 4, 5);
  DataTransformer<TypeParam>* transformer =
      new DataTransformer<TypeParam>(transform_param, TEST);
  transformer->InitRand();
  transformer->Transform(datum, blob);
  EXPECT_EQ(blob->num(), 1);
  EXPECT_EQ(blob->channels(), datum.channels());
  EXPECT_EQ(blob->height(), datum.height());
  EXPECT_EQ(blob->width(), datum.width());
  for (int j = 0; j < blob->count(); ++j) {
    EXPECT_EQ(blob->cpu_data()[j], j);
  }
}
コード例 #27
0
TYPED_TEST(DataTransformTest, TestEmptyTransform) {
  TransformationParameter transform_param;
  const bool unique_pixels = false;  // all pixels the same equal to label
  const int label = 0;
  const int channels = 3;
  const int height = 4;
  const int width = 5;

  Datum datum;
  FillDatum(label, channels, height, width, unique_pixels, &datum);
  Blob<TypeParam>* blob = new Blob<TypeParam>(1, channels, height, width);
  DataTransformer<TypeParam>* transformer =
      new DataTransformer<TypeParam>(transform_param, TEST);
  transformer->InitRand();
  transformer->Transform(datum, blob);
  EXPECT_EQ(blob->num(), 1);
  EXPECT_EQ(blob->channels(), datum.channels());
  EXPECT_EQ(blob->height(), datum.height());
  EXPECT_EQ(blob->width(), datum.width());
  for (int j = 0; j < blob->count(); ++j) {
    EXPECT_EQ(blob->cpu_data()[j], label);
  }
}
コード例 #28
0
ファイル: blob.cpp プロジェクト: MaoXu/jade
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
  Reshape(other.num(), other.channels(), other.height(), other.width());
}
コード例 #29
0
int main(int argc, char** argv)
{

//	cudaSetDevice(0);
    Caffe::set_phase(Caffe::TEST);
//	Caffe::SetDevice(3);
    //Caffe::set_mode(Caffe::CPU);

    if (argc == 8 && strcmp(argv[7], "CPU") == 0) {
//		LOG(ERROR) << "Using CPU";
//		Caffe::set_mode(Caffe::CPU);
    } else {
//		LOG(ERROR) << "Using GPU";
//		Caffe::set_mode(Caffe::GPU);
    }

    Caffe::set_mode(Caffe::CPU);
    NetParameter test_net_param;
    ReadProtoFromTextFile(argv[1], &test_net_param);
    Net<float> caffe_test_net(test_net_param);
    NetParameter trained_net_param;
    ReadProtoFromBinaryFile(argv[2], &trained_net_param);
    caffe_test_net.CopyTrainedLayersFrom(trained_net_param);

    vector<shared_ptr<Layer<float> > > layers = caffe_test_net.layers();

    string labelFile(argv[3]);
    int data_counts = 0;
    FILE * file = fopen(labelFile.c_str(), "r");
    while(fgets(buf,100000,file) > 0)
    {
        data_counts++;
    }
    fclose(file);

    vector<Blob<float>*> dummy_blob_input_vec;
    string rootfolder(argv[4]);
    rootfolder.append("/");
    CreateDir(rootfolder.c_str(), rootfolder.size() - 1);
    string folder;
    string fName;

    float output;
    int counts = 0;

    file = fopen(labelFile.c_str(), "r");

    Blob<float>* c1 = (*(caffe_test_net.bottom_vecs().rbegin()))[0];
    int c2 = c1->num();
    int batchCount = std::ceil(data_counts / (floor)(c2));//(test_net_param.layers(0).layer().batchsize()));//                (test_net_param.layers(0).layer().batchsize() ));

    string resulttxt = rootfolder + "segResult.txt";
    FILE * resultfile = fopen(resulttxt.c_str(), "w");

    for (int batch_id = 0; batch_id < batchCount; ++batch_id)
    {
        LOG(INFO)<< "processing batch :" << batch_id+1 << "/" << batchCount <<"...";

        const vector<Blob<float>*>& result = caffe_test_net.Forward(dummy_blob_input_vec);
        Blob<float>* bboxs = (*(caffe_test_net.bottom_vecs().rbegin()))[0];
        int bsize = bboxs->num();

        const Blob<float>* labels = (*(caffe_test_net.bottom_vecs().rbegin()))[1];
        for (int i = 0; i < bsize && counts < data_counts; i++, counts++)
        {
            char fname[1010];
            fscanf(file, "%s", fname);
            /*for(int w = 0; w < LABEL_SIZE; w ++)
            	for(int h = 0; h < LABEL_SIZE; h ++)
            	{
            		int lbl;
            		fscanf(file,"%d",&lbl);
            	}*/
            fprintf(resultfile, "%s ", fname);
            int len =  bboxs->channels();
            for(int j = 0; j < len; j ++)
            {
                fprintf(resultfile, "%f ", (float)(bboxs->data_at(i, j, 0, 0)) );
            }
            fprintf(resultfile, "\n");
        }
    }

    fclose(resultfile);
    fclose(file);


    return 0;
}
コード例 #30
0
void CompactDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  if (top->size() == 1) {
    this->output_labels_ = false;
  } else {
    this->output_labels_ = true;
  }
  DataLayerSetUp(bottom, top);
  // The subclasses should setup the datum channels, height and width
  CHECK_GT(this->datum_channels_, 0);
  CHECK_GT(this->datum_height_, 0);
  CHECK_GT(this->datum_width_, 0);
  CHECK(this->transform_param_.crop_size() > 0);
  CHECK_GE(this->datum_height_, this->transform_param_.crop_size());
  CHECK_GE(this->datum_width_, this->transform_param_.crop_size());
  int crop_size = this->transform_param_.crop_size();

  // check if we want to have mean
  if (transform_param_.has_mean_file()) {
	  //CHECK(this->transform_param_.has_mean_file());
	  this->data_mean_.Reshape(1, this->datum_channels_, crop_size, crop_size);
	  const string& mean_file = this->transform_param_.mean_file();
	  LOG(INFO) << "Loading mean file from" << mean_file;
	  BlobProto blob_proto;
	  ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
	  this->data_mean_.FromProto(blob_proto);
	  Blob<Dtype> tmp;
	  tmp.FromProto(blob_proto);
	  const Dtype* src_data = tmp.cpu_data();
	  Dtype* dst_data = this->data_mean_.mutable_cpu_data();
	  CHECK_EQ(tmp.num(), 1);
	  CHECK_EQ(tmp.channels(), this->datum_channels_);
	  CHECK_GE(tmp.height(), crop_size);
	  CHECK_GE(tmp.width(), crop_size);
	  int w_off = (tmp.width() - crop_size) / 2;
	  int h_off = (tmp.height() - crop_size) / 2;
	  for (int c = 0; c < this->datum_channels_; c++) {
		  for (int h = 0; h < crop_size; h++) {
			  for (int w = 0; w < crop_size; w++) {
				  int src_idx = (c * tmp.height() + h + h_off) * tmp.width() + w + w_off;
				  int dst_idx = (c * crop_size + h) * crop_size + w;
				  dst_data[dst_idx] = src_data[src_idx];
			  }
		  }
	  }
  } else {
	// Simply initialize an all-empty mean.
	this->data_mean_.Reshape(1, this->datum_channels_, crop_size, crop_size);
  }

  this->mean_ = this->data_mean_.cpu_data();
  this->data_transformer_.InitRand();

  this->prefetch_data_.mutable_cpu_data();
  if (this->output_labels_) {
    this->prefetch_label_.mutable_cpu_data();
  }
  DLOG(INFO) << "Initializing prefetch";
  this->CreatePrefetchThread();
  DLOG(INFO) << "Prefetch initialized.";
}