void ImageDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int new_height = this->layer_param_.image_data_param().new_height();
  const int new_width  = this->layer_param_.image_data_param().new_width();
  const bool is_color  = this->layer_param_.image_data_param().is_color();
  string root_folder = this->layer_param_.image_data_param().root_folder();

  CHECK((new_height == 0 && new_width == 0) ||
      (new_height > 0 && new_width > 0)) << "Current implementation requires "
      "new_height and new_width to be set at the same time.";
  // Read the file with filenames and labels
  const string& source = this->layer_param_.image_data_param().source();
  LOG(INFO) << "Opening file " << source;
  std::ifstream infile(source.c_str());
  string filename;
  int label;
  while (infile >> filename >> label) {
    lines_.push_back(std::make_pair(filename, label));
  }

  if (this->layer_param_.image_data_param().shuffle()) {
    // randomly shuffle data
    LOG(INFO) << "Shuffling data";
    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
    ShuffleImages();
  }
  LOG(INFO) << "A total of " << lines_.size() << " images.";

  lines_id_ = 0;
  // Check if we would need to randomly skip a few data points
  if (this->layer_param_.image_data_param().rand_skip()) {
    unsigned int skip = caffe_rng_rand() %
        this->layer_param_.image_data_param().rand_skip();
    LOG(INFO) << "Skipping first " << skip << " data points.";
    CHECK_GT(lines_.size(), skip) << "Not enough points to skip";
    lines_id_ = skip;
  }
  // Read an image, and use it to initialize the top blob.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                    new_height, new_width, is_color);
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
  // Use data_transformer to infer the expected blob shape from a cv_image.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  this->transformed_data_.Reshape(top_shape);
  // Reshape prefetch_data and top[0] according to the batch_size.
  const int batch_size = this->layer_param_.image_data_param().batch_size();
  CHECK_GT(batch_size, 0) << "Positive batch size required";
  top_shape[0] = batch_size;
  this->prefetch_data_.Reshape(top_shape);
  top[0]->ReshapeLike(this->prefetch_data_);

  LOG(INFO) << "output data size: " << top[0]->num() << ","
      << top[0]->channels() << "," << top[0]->height() << ","
      << top[0]->width();
  // label
  vector<int> label_shape(1, batch_size);
  top[1]->Reshape(label_shape);
  this->prefetch_label_.Reshape(label_shape);
}
 void InternalThreadEntry() {
   if (sizeof(uint_tp) == 4) {
     EXPECT_EQ(2682223724U, caffe_rng_rand());
   } else {
     EXPECT_EQ(10282592414170385089UL, caffe_rng_rand());
   }
 }
Beispiel #3
0
void FloDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  string root_folder = this->layer_param_.image_data_param().root_folder();

  // Read the file with filenames and labels
  const string& source = this->layer_param_.image_data_param().source();
  LOG(INFO) << "Opening file " << source;
  std::ifstream infile(source.c_str());
  string filename;
  while (infile >> filename) {
    lines_.push_back(std::make_pair(filename, 0));
  }

  if (this->layer_param_.image_data_param().shuffle()) {
    // randomly shuffle data
    LOG(INFO) << "Shuffling data";
    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
    ShuffleImages();
  }
  LOG(INFO) << "A total of " << lines_.size() << " images.";

  lines_id_ = 0;
  // Check if we would need to randomly skip a few data points
  if (this->layer_param_.image_data_param().rand_skip()) {
    unsigned int skip = caffe_rng_rand() %
        this->layer_param_.image_data_param().rand_skip();
    LOG(INFO) << "Skipping first " << skip << " data points.";
    CHECK_GT(lines_.size(), skip) << "Not enough points to skip";
    lines_id_ = skip;
  }
  
  // Read an image, and use it to initialize the top blob.
  int xSize, ySize;
  CHECK(readFloFile(root_folder + lines_[lines_id_].first, NULL, xSize, ySize))
      << "Could not load " << lines_[lines_id_].first;
  
  // Use data_transformer to infer the expected blob shape from a cv_image.
  vector<int> top_shape = vector<int>(4);
  top_shape[0] = 1;
  top_shape[1] = 2;
  top_shape[2] = ySize;
  top_shape[3] = xSize;
  
  this->transformed_data_.Reshape(top_shape);
  
  // Reshape prefetch_data and top[0] according to the batch_size.
  const int batch_size = this->layer_param_.image_data_param().batch_size();
  CHECK_GT(batch_size, 0) << "Positive batch size required";
  top_shape[0] = batch_size;
  for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
    this->prefetch_[i].data_.Reshape(top_shape);
  }
  top[0]->Reshape(top_shape);

  LOG(INFO) << "output data size: " << top[0]->num() << ","
      << top[0]->channels() << "," << top[0]->height() << ","
      << top[0]->width();
  // label
}
 SemiLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(25, 1, 1, 1)),
       blob_bottom_label_(new Blob<Dtype>(25, 1, 1, 1)),
       blob_top_loss_(new Blob<Dtype>()) {
   // fill the values
   Caffe::set_random_seed(1701);
   FillerParameter filler_param;
   filler_param.set_std(25);
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   for (int i = 0; i < 5; ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = (caffe_rng_rand() % 2) - 2;//-2 or -1,positive or negative
   }
   for (int i = 5; i < 10; ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = 5600;//weakly bag, img idx = 0
   }
   for (int i = 10; i < 15; ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = (caffe_rng_rand() % 2) - 2;//-2 or -1,positive or negative
   }
   for (int i = 15; i < 20; ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = 7562;//weakly bag, img idx = 1
   }
   for (int i = 20; i < 25; ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = (caffe_rng_rand() % 2) - 2;
   }
   blob_bottom_vec_.push_back(blob_bottom_label_);
   blob_top_vec_.push_back(blob_top_loss_);
 }
 void InternalThreadEntry() {
   if (sizeof(uint_tp) == 4) {
     EXPECT_EQ(887095485U, caffe_rng_rand());
   } else {
     EXPECT_EQ(10310463406559028313UL, caffe_rng_rand());
   }
 }
TYPED_TEST(ClusteringLayerTest, ClusteringLayerTestKmeansDominate) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  ClusteringParameter* clustering_layer_param = layer_param.mutable_clustering_param();
  int total_class = 2;
  int k = 3;
  clustering_layer_param->set_num_output(NUM_OUT);
  clustering_layer_param->set_total_class(total_class);
  clustering_layer_param->set_k(k);
  clustering_layer_param->set_branch(true); 
  clustering_layer_param->set_across_class(false); 
  clustering_layer_param->set_data_size(20); 
  clustering_layer_param->set_dominate(1); 

  ClusteringLayer<Dtype> layer(layer_param);
 
  // set data
  // this->blob_bottom1_ = new Blob<Dtype>(50, CHENNAL, HEIGHT, WIDTH);
  // this->blob_bottom2_ = new Blob<Dtype>(50, 1, 1, 1);
  for (int n = 0; n < NUM; ++n)
  {
    Dtype r = caffe_rng_rand() % 3 * 10;
    this->blob_bottom2_->mutable_cpu_data()[n] = caffe_rng_rand() % 2;  // label
    if (this->blob_bottom2_->mutable_cpu_data()[n] == 1){
      r += 40;
    }
    for (int i = 0; i < CHENNAL * WIDTH * HEIGHT; ++i)
    {
      const int idx = n * CHENNAL * WIDTH * HEIGHT + i;
      Dtype noise = 3.0 * caffe_rng_rand() / UINT_MAX;
      this->blob_bottom1_->mutable_cpu_data()[idx] = r + noise;
    }
  }
  this->blob_bottom_vec_.resize(2);
  this->blob_bottom_vec_[0] = this->blob_bottom1_;
  this->blob_bottom_vec_[1] = this->blob_bottom2_;

  // do kmeans
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);

  // checking
  vector<shared_ptr<Blob<Dtype> > > & blobs = layer.blobs();
  EXPECT_EQ(blobs.size(), 1 * k * 3);

  // LOG(ERROR) << "Blob size: " << UINT_MAX;

  // LOG(ERROR) << "Blob size: " << blobs.size();
  for (int i = 1 * k * 2; i < blobs.size(); ++i)
  {
    LOG(ERROR) << this->blob_to_string(blobs[i].get());
  }
  
}
void RandomPerspectiveTransformImage(const cv::Mat& src_img,
    const int perspective_transformation_border,
    cv::Mat* dst_img) {
  CHECK_GE(perspective_transformation_border, 0);
  CHECK_LE(perspective_transformation_border, src_img.rows/2);
  CHECK_LE(perspective_transformation_border, src_img.cols/2);
  cv::Point2f src_shape[4];
  src_shape[0] = cv::Point2f(0+
      caffe_rng_rand()%perspective_transformation_border,
      0+caffe_rng_rand()%perspective_transformation_border);
  src_shape[1] = cv::Point2f(0+
      caffe_rng_rand()%perspective_transformation_border,
      src_img.rows-caffe_rng_rand()%perspective_transformation_border);
  src_shape[2] = cv::Point2f(src_img.cols
      -caffe_rng_rand()%perspective_transformation_border,
      src_img.rows-caffe_rng_rand()%perspective_transformation_border);
  src_shape[3] = cv::Point2f(src_img.cols
      -caffe_rng_rand()%perspective_transformation_border,
      0+caffe_rng_rand()%perspective_transformation_border);
  cv::Point2f dst_shape[4];
  dst_shape[0] = cv::Point2f(0, 0);
  dst_shape[1] = cv::Point2f(0, src_img.rows);
  dst_shape[2] = cv::Point2f(src_img.cols, src_img.rows);
  dst_shape[3] = cv::Point2f(src_img.cols, 0);
  cv::Mat ptmatrix = cv::getPerspectiveTransform(src_shape, dst_shape);
  cv::warpPerspective(src_img, *dst_img, ptmatrix,
      cv::Size(src_img.cols, src_img.cols),
      cv::INTER_LINEAR, cv::BORDER_CONSTANT);
}
std::vector<typename TransformationLayer<Dtype>::Affine2D>
TransformationLayer<Dtype>::generate(int N, int W, int H, int Wo, int Ho) {
  std::vector<Affine2D> r;
  for (int i = 0; i < N; i++) {
    if (synchronized_ && i) {
      r.push_back(r.back());
    } else {
      Dtype rot = 0, scale = 1, sx = 0, sy = 0;
      if (rotate_) caffe_rng_uniform<Dtype>(1, -M_PI, M_PI, &rot);
      Dtype sin_rot = sin(rot);
      Dtype cos_rot = cos(rot);
      Dtype rot_w = Wo*fabs(cos_rot) + Ho*fabs(sin_rot);
      Dtype rot_h = Wo*fabs(sin_rot) + Ho*fabs(cos_rot);
      Dtype max_scale = std::min(max_scale_, std::min(H / rot_h, W / rot_w));
      Dtype min_scale = std::min(min_scale_, max_scale);
      caffe_rng_uniform<Dtype>(1, min_scale, max_scale, &scale);
      Dtype scl_w = std::min(rot_w * scale, Dtype(W));
      caffe_rng_uniform<Dtype>(1, -(W-scl_w)/2, (W-scl_w)/2, &sx);
      Dtype scl_h = std::min(rot_h * scale, Dtype(H));
      caffe_rng_uniform<Dtype>(1, -(H-scl_h)/2, (H-scl_h)/2, &sy);
      Affine2D T0(1, 0, 0, 1, -0.5*Wo, -0.5*Ho);
      Affine2D RS(scale * cos_rot, -scale * sin_rot,
                  scale * sin_rot,  scale * cos_rot, 0, 0);
      if (mirror_ && caffe_rng_rand()&1) {
        RS.a00_ = -RS.a00_;
        RS.a10_ = -RS.a10_;
      }
      Affine2D T1(1, 0, 0, 1, 0.5*W + sx, 0.5*H + sy);
      r.push_back(T1*RS*T0);
    }
  }
  return r;
}
        SemiContrastLossLayerTest()
            : blob_bottom_data_(new Blob<Dtype>(300,10,1,1)),
              blob_bottom_y_(new Blob<Dtype>(300,1,1,1)),
              blob_bottom_feat_(new Blob<Dtype>(300,10,1,1)),
              blob_top_loss_(new Blob<Dtype>()) {

              FillerParameter filler_param;
              UniformFiller<Dtype> filler(filler_param);
              //GaussianFiller<Dtype> filler(filler_param);
              filler.Fill(this->blob_bottom_data_);
              blob_bottom_vec_.push_back(blob_bottom_data_);
              const Dtype* data = blob_bottom_data_->cpu_data();
              for(int i=0;i<blob_bottom_data_->count();i++){
                printf("%.3f ",data[i]);
                if((i+1)%10==0) printf("\n");
              }
              printf("\n");
              int ncount = 0;
              printf("label:");
              for (int i = 0;i<blob_bottom_y_->count();i++) {
                  blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 10-Dtype(1.0);
                  if(blob_bottom_y_->cpu_data()[i]==Dtype(-1))
                    ncount++;
                  printf("%.0f ",blob_bottom_y_->cpu_data()[i]);
              }
              printf(":num:%d\n",ncount);
              blob_bottom_vec_.push_back(blob_bottom_y_);
              filler.Fill(this->blob_bottom_feat_);
              blob_bottom_vec_.push_back(blob_bottom_feat_);
              blob_top_vec_.push_back(blob_top_loss_);
            }
Beispiel #10
0
static void bernoulli_generate(int n, double p, int* r) {
  int seed = 17 + caffe_rng_rand() % 4096;

#ifdef _OPENMP
  int nthr = omp_get_max_threads();
  int threshold = nthr * caffe::cpu::OpenMpManager::getProcessorSpeedMHz() / 3;
  bool run_parallel =
    (Caffe::mode() != Caffe::GPU) &&
    (omp_in_parallel() == 0) &&
    (n >= threshold);
  if (!run_parallel) nthr = 1;

# pragma omp parallel num_threads(nthr)
  {
    const int ithr = omp_get_thread_num();
    const int avg_amount = (n + nthr - 1) / nthr;
    const int my_offset = ithr * avg_amount;
    const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
#else
  {
    const int my_amount = n;
    const int my_offset = 0;
#endif

    VSLStreamStatePtr stream;
    vslNewStream(&stream, VSL_BRNG_MCG31, seed);
    vslSkipAheadStream(stream, my_offset);
    viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount,
      r + my_offset, p);
    vslDeleteStream(&stream);
  }
}
  SparseLogLayerTest()
      : blob_bottom_(new Blob<Dtype>(10, 1, 5, 5)),
        blob_top_(new Blob<Dtype>(10, 1, 5, 5)) {
    // fill the values
    FillerParameter filler_param;
    filler_param.set_min(0.0);
    filler_param.set_max(1.5);
    UniformFiller<Dtype>  filler(filler_param);
    //GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_);

    // randomly set channel to 0.0 in label data
    int num = this->blob_bottom_->num();
    int height = this->blob_bottom_->height();
    int width = this->blob_bottom_->width();
    int spatial_count = height * width;
    Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data();

    for (int n = 0; n < num; ++n) 
    {
        for (int i = 0; i < spatial_count; ++i)
        {
    	   if (!(caffe_rng_rand() % 2)) 
             *(bottom_data + this->blob_bottom_->offset(n) + i) = Dtype(0.0);
        }
    }

    blob_bottom_vec_.push_back(blob_bottom_);
    blob_top_vec_.push_back(blob_top_);
  }
 InfogainLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(4, 2, 1, 5, 2)),
       blob_bottom_label_(new Blob<Dtype>(4, 2, 1, 1, 2)),
       blob_bottom_infogain_(new Blob<Dtype>(1, 1, 1, 5, 5)),
       blob_top_loss_(new Blob<Dtype>()),
       blob_top_prob_(new Blob<Dtype>()),
       inner_(2), outer_(4*2), num_labels_(5) {
   Caffe::set_random_seed(1701);
   FillerParameter filler_param;
   filler_param.set_min(-0.5);
   filler_param.set_max(2.0);
   UniformFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   for (int i = 0; i < blob_bottom_label_->count(); ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] =
       caffe_rng_rand() % num_labels_;
   }
   blob_bottom_vec_.push_back(blob_bottom_label_);
   filler_param.set_min(0.1);
   filler_param.set_max(2.0);
   UniformFiller<Dtype> infogain_filler(filler_param);
   infogain_filler.Fill(this->blob_bottom_infogain_);
   blob_bottom_vec_.push_back(blob_bottom_infogain_);
   blob_top_vec_.push_back(blob_top_loss_);
   blob_top_vec_.push_back(blob_top_prob_);
 }
  AccuracyLayerTest()
      : blob_bottom_data_(new Blob<Dtype>()),
        blob_bottom_label_(new Blob<Dtype>()),
        blob_top_(new Blob<Dtype>()),
        top_k_(3) {
    vector<int> shape(2);
    shape[0] = 100;
    shape[1] = 10;
    blob_bottom_data_->Reshape(shape);
    shape.resize(1);
    blob_bottom_label_->Reshape(shape);
    // fill the probability values
    FillerParameter filler_param;
    GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_data_);

    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    shared_ptr<Caffe::RNG> rng(new Caffe::RNG(prefetch_rng_seed));
    caffe::rng_t* prefetch_rng =
          static_cast<caffe::rng_t*>(rng->generator());
    Dtype* label_data = blob_bottom_label_->mutable_cpu_data();
    for (int i = 0; i < 100; ++i) {
      label_data[i] = (*prefetch_rng)() % 10;
    }

    blob_bottom_vec_.push_back(blob_bottom_data_);
    blob_bottom_vec_.push_back(blob_bottom_label_);
    blob_top_vec_.push_back(blob_top_);
  }
Beispiel #14
0
TYPED_TEST(GPUMathFunctionsTest, TestScale) {
  int n = this->blob_bottom_->count();
  TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() %
                                                   this->blob_bottom_->count()];

  device *dc = Caffe::GetDefaultDevice();
  if (dc->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
    caffe_gpu_scale<TypeParam>(n, alpha, this->blob_bottom_->gpu_data(),
                             this->blob_bottom_->mutable_gpu_diff());
#endif  // USE_CUDA
  } else {
#ifdef USE_GREENTEA
    greentea_gpu_scale<TypeParam>(dc->id(), n, alpha,
                         (cl_mem)(this->blob_bottom_->gpu_data()), 0,
                         (cl_mem)(this->blob_bottom_->mutable_gpu_diff()), 0);
#endif  // USE_GREENTEA
  }

  const TypeParam* scaled = this->blob_bottom_->cpu_diff();
  const TypeParam* x = this->blob_bottom_->cpu_data();
  for (int i = 0; i < n; ++i) {
    EXPECT_EQ(scaled[i], x[i] * alpha);
  }
}
 OneVersusAllLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(10, 1, 5, 5)),
       blob_bottom_targets_(new Blob<Dtype>(10, 1, 5, 5)),
       blob_top_loss_(new Blob<Dtype>()) {
   // Fill the data vector
   FillerParameter data_filler_param;
   data_filler_param.set_min(0.011); // gradient checker adds +/- 0.01
   data_filler_param.set_max(0.989);
   UniformFiller<Dtype> data_filler(data_filler_param);
   data_filler.Fill(blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   // Fill the targets vector
   /*FillerParameter targets_filler_param;
   targets_filler_param.set_min(0);
   targets_filler_param.set_max(1);
   UniformFiller<Dtype> targets_filler(targets_filler_param);
   targets_filler.Fill(blob_bottom_targets_);*/
   int temp;
   for (int i = 0; i < blob_bottom_targets_->count(); ++i) {
     temp = caffe_rng_rand() % 12;
     assert(temp < 12);
     if(temp < 11) {
       blob_bottom_targets_->mutable_cpu_data()[i] =
                                         ((Dtype)temp) / ((Dtype)10);
     } else {
       blob_bottom_targets_->mutable_cpu_data()[i] = 253; // ignore label
     }
   }
   blob_bottom_vec_.push_back(blob_bottom_targets_);
   blob_top_vec_.push_back(blob_top_loss_);
 }
Beispiel #16
0
RDMAChannel::RDMAChannel(const RDMAAdapter& adapter)
    : adapter_(adapter),
      buffers_(),
      memory_regions_(MAX_BUFFERS),
      region_regions_(MAX_BUFFERS),
      memory_regions_received_() {

  // Create write completion queue
  write_cq_ = ibv_create_cq(adapter_.context_, 1, NULL, NULL, 0);
  CHECK(write_cq_) << "Failed to create completion queue";

  // Create queue pair
  {
    struct ibv_qp_init_attr attr;
    caffe_memset(sizeof(ibv_qp_init_attr), 0, &attr);
    attr.send_cq = write_cq_;
    attr.recv_cq = adapter.cq_;
    attr.cap.max_send_wr = RDMAAdapter::MAX_CONCURRENT_WRITES;
    attr.cap.max_recv_wr = RDMAAdapter::MAX_CONCURRENT_WRITES;
    attr.cap.max_send_sge = 1;
    attr.cap.max_recv_sge = 1;
    attr.qp_type = IBV_QPT_RC;

    qp_ = ibv_create_qp(adapter.pd_, &attr);
    CHECK(qp_) << "Failed to create queue pair";
  }

  // Init queue pair
  {
    struct ibv_qp_attr attr;
    caffe_memset(sizeof(ibv_qp_attr), 0, &attr);
    attr.qp_state = IBV_QPS_INIT;
    attr.pkey_index = 0;
    attr.port_num = 1;
    attr.qp_access_flags = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE;

    int mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT
        | IBV_QP_ACCESS_FLAGS;
    CHECK(!ibv_modify_qp(qp_, &attr, mask)) << "Failed to set QP to INIT";
  }

  // Local address
  {
    struct ibv_port_attr attr;
    CHECK(!ibv_query_port(adapter.context_, (uint8_t) 1, &attr))
        << "Query port";
    self_.lid = attr.lid;
    self_.qpn = qp_->qp_num;
    self_.psn = caffe_rng_rand() & 0xffffff;
  }

  for (int i = 0; i < MAX_BUFFERS; ++i) {
    RecvMR(i);
  }

  // Create initial recv request for data. 
  recv();
  // Create initial recv request for ctrl signals.
  recv();
}
  virtual void SetUp() {
    Caffe::set_random_seed(1601);
    vector<int> shape1, shape2;
    shape1.push_back(NUM);
    shape1.push_back(CHENNAL);
    shape1.push_back(HEIGHT);
    shape1.push_back(WIDTH);
    shape2.push_back(NUM);
    shape2.push_back(1);
    shape2.push_back(1);
    shape2.push_back(1);
    blob_bottom1_->Reshape(shape1);
    blob_bottom2_->Reshape(shape2);

    // fill the values
    FillerParameter filler_param;
    GaussianFiller<Dtype> filler1(filler_param);
    GaussianFiller<Dtype> filler2(filler_param);    
    filler1.Fill(this->blob_bottom1_);
    for (int i = 0; i < NUM; ++i){
      blob_bottom2_->mutable_cpu_data()[i] = caffe_rng_rand() % 2;
    }
    for (int i = 0; i < NUM; ++i){
      for (int j = 0; j < CHENNAL * HEIGHT * WIDTH; ++j){
        int idx = i * CHENNAL * HEIGHT * WIDTH + j;
        blob_bottom1_->mutable_cpu_data()[idx] = i * 10;
      } 
    }

    blob_bottom_vec_.push_back(blob_bottom1_);
    blob_bottom_vec_.push_back(blob_bottom2_);
    blob_top_vec_.push_back(blob_top_);

  }
Beispiel #18
0
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  // Initialize DB
  db_.reset(db::GetDB(this->layer_param_.data_param().backend()));
  db_->Open(this->layer_param_.data_param().source(), db::READ);
  cursor_.reset(db_->NewCursor());

  if (this->layer_param_.data_param().rand_skip() ||
      this->layer_param_.data_param().skip()) {
    unsigned int skip;
    // Check if we should randomly skip a few data points
    if (this->layer_param_.data_param().rand_skip()) {
      skip = caffe_rng_rand() %
                          this->layer_param_.data_param().rand_skip();
    } else {
      skip = this->layer_param_.data_param().skip();
    }
    LOG(INFO) << "Skipping first " << skip << " data points.";
    while (skip-- > 0) {
      cursor_->Next();
    }
  }

  // Read a data point, and use it to initialize the top blob.
  Datum datum;
  datum.ParseFromString(cursor_->value());

  bool force_color = this->layer_param_.data_param().force_encoded_color();
  if ((force_color && DecodeDatum(&datum, true)) ||
      DecodeDatumNative(&datum)) {
    LOG(INFO) << "Decoding Datum";
  }
  // image
  int crop_size = this->layer_param_.transform_param().crop_size();
  if (crop_size > 0) {
    top[0]->Reshape(this->layer_param_.data_param().batch_size(),
        datum.channels(), crop_size, crop_size);
    this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
        datum.channels(), crop_size, crop_size);
    this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size);
  } else {
    top[0]->Reshape(
        this->layer_param_.data_param().batch_size(), datum.channels(),
        datum.height(), datum.width());
    this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(),
        datum.channels(), datum.height(), datum.width());
    this->transformed_data_.Reshape(1, datum.channels(),
      datum.height(), datum.width());
  }
  LOG(INFO) << "output data size: " << top[0]->num() << ","
      << top[0]->channels() << "," << top[0]->height() << ","
      << top[0]->width();
  // label
  if (this->output_labels_) {
    vector<int> label_shape(1, this->layer_param_.data_param().batch_size());
    top[1]->Reshape(label_shape);
    this->prefetch_label_.Reshape(label_shape);
  }
}
Beispiel #19
0
  int MapIndexBottomToTop(int bottom_idx, int scale_w,
                          int scale_h, bool randomize) {
    const int input_width = bottom_idx % blob_bottom_->width();
    const int input_height = bottom_idx / blob_bottom_->width();
    const int top_w = scale_w * blob_bottom_->width();
    int out_w = scale_w*input_width+(randomize ? caffe_rng_rand()%scale_w : 0);
    int out_h = scale_h*input_height+(randomize ? caffe_rng_rand()%scale_h : 0);
    int out_idx = out_w + out_h * top_w;
//     std::cout << "mask i, iw, ih, ow, oh, topw, outidx: "
//               << bottom_idx << " " << input_width << " "
//               << input_height << " "
//               << out_w << " "
//               << out_h << " "
//               << top_w << " "
//               << out_idx << "\n";
    return out_idx;
  }
Beispiel #20
0
void DepthDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
    const int new_height = this->layer_param_.depth_data_param().new_height();
    const int new_width  = this->layer_param_.depth_data_param().new_width();
    const bool is_color  = this->layer_param_.depth_data_param().is_color();
    string root_folder   = this->layer_param_.depth_data_param().root_folder();

    CHECK((new_height == 0 && new_width == 0) ||
          (new_height > 0 && new_width > 0)) << "Current implementation requires "
                  "new_height and new_width to be set at the same time.";
    // Read the file with image filenames and depth filenames
    const string& source = this->layer_param_.depth_data_param().source();
    LOG(INFO) << "Opening file " << source;
    std::ifstream infile(source.c_str());
    string image_filename;
    string depth_filename;
    while (infile >> image_filename >> depth_filename) {
        lines_.push_back(std::make_pair(image_filename, depth_filename));
    }
    infile.close();

    // randomly shuffle data
    LOG(INFO) << "Shuffleing data";
    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
    ShuffleImages();
    LOG(INFO) << "A total of " << lines_.size() << " images.";

    lines_id_ = 0;

    //image
    // Read an image, and use it to initialize the top blob.
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                      new_height, new_width, is_color);
    // Use data_transformer to infer the expected blob shape from a cv_image.
    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
    this->transformed_data_.Reshape(top_shape);
    // Reshape prefetch_data and top[0] according to the batch_size.
    const int batch_size = this->layer_param_.depth_data_param().batch_size();
    CHECK_GT(batch_size, 0) << "Positive batch size required";
    top_shape[0] = batch_size;
    this->prefetch_data_.Reshape(top_shape);
    top[0]->ReshapeLike(this->prefetch_data_);

    LOG(INFO) << "output data size: " << top[0]->num() << ","
              << top[0]->channels() << "," << top[0]->height() << ","
              << top[0]->width();

    //depths
    vector<int> label_shape;
    label_shape.push_back(batch_size);
    label_shape.push_back(74*74);
    top[1]->Reshape(label_shape);
    this->prefetch_label_.Reshape(label_shape);

    LOG(INFO) << "output depth size: " << label_shape[0] << ","
              << label_shape[1];

}
Beispiel #21
0
void DataTransformer<Dtype>::InitRand() {
  const bool needs_rand = param_.mirror() ||
      (phase_ == TRAIN && param_.crop_size());
  if (needs_rand) {
    const unsigned int rng_seed = caffe_rng_rand();
    rng_.reset(new Caffe::RNG(rng_seed));
  } else {
    rng_.reset();
  }
}
TYPED_TEST(MathFunctionsTest, TestScaleGPU) {
  int n = this->blob_bottom_->count();
  TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() %
                                                   this->blob_bottom_->count()];
  caffe_gpu_scale<TypeParam>(n, alpha, this->blob_bottom_->gpu_data(),
                             this->blob_bottom_->mutable_gpu_diff());
  const TypeParam* scaled = this->blob_bottom_->cpu_diff();
  const TypeParam* x = this->blob_bottom_->cpu_data();
  for (int i = 0; i < n; ++i) {
    EXPECT_EQ(scaled[i], x[i] * alpha);
  }
}
	void InfogainLossLayerTestForwardPerformance(int num_images, int num_channels, int im_width, int im_height) {

		typedef typename TypeParam::Dtype Dtype;
		LayerParameter layer_param;
		InfogainLossLayer<Dtype> layer(layer_param);

		blob_bottom_data_->Reshape(num_images, num_channels, 1, 1);
		blob_bottom_label_->Reshape(num_images, 1, 1, 1);
		blob_bottom_infogain_->Reshape(1, 1, num_channels, num_channels);

		FillerParameter filler_param;
		UniformFiller<Dtype> filler(filler_param);
		filler.Fill(this->blob_bottom_data_);

		for (int i = 0; i < blob_bottom_label_->count(); ++i) {
			blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5;
		}

		filler_param.set_min(0.1);
		filler_param.set_max(2.0);
		UniformFiller<Dtype> infogain_filler(filler_param);
		infogain_filler.Fill(this->blob_bottom_infogain_);

		blob_bottom_vec_.clear();
		blob_bottom_vec_.push_back(blob_bottom_data_);
		blob_bottom_vec_.push_back(blob_bottom_label_);
		blob_bottom_vec_.push_back(blob_bottom_infogain_);

		layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);

#if defined(USE_CUDA) || defined(USE_OPENCL)
		blob_bottom_data_->mutable_gpu_data();
		blob_bottom_data_->mutable_gpu_diff();
		blob_bottom_label_->mutable_gpu_data();
		blob_bottom_label_->mutable_gpu_diff();
		blob_bottom_infogain_->mutable_gpu_data();
		blob_bottom_infogain_->mutable_gpu_diff();
		blob_top_loss_->mutable_gpu_data();
		blob_top_loss_->mutable_gpu_diff();
#endif

		record r;
		r.type = std::string(typeid(Dtype).name());
		r.num_images = num_images;
		r.num_channels = num_channels;
		r.img_width = im_width;
		r.img_height = im_height;

		BENCH(r, {
			layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_)
			;
		});
Beispiel #24
0
 virtual void SetUp() {
   Caffe::set_random_seed(1601);
   blob_bottom_->Reshape(BATCH_SIZE, SENTENCE_LENGTH, 1, 1);
   // fill the values
   Dtype * bottom_data = this->blob_bottom_->mutable_cpu_data();
   for (int n = 0; n < BATCH_SIZE; ++n) {
     for (int i = 0; i < SENTENCE_LENGTH; ++i) {
       bottom_data[i + n * SENTENCE_LENGTH] = caffe_rng_rand() % VOCAB_SIZE;
     }
   }
   blob_bottom_vec_.push_back(blob_bottom_);
   blob_top_vec_.push_back(blob_top_);
 }
 MultinomialLogisticLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
       blob_bottom_label_(new Blob<Dtype>(10, 1, 1, 1)) {
   Caffe::set_random_seed(1701);
   // fill the values
   FillerParameter filler_param;
   PositiveUnitballFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   for (int i = 0; i < blob_bottom_label_->count(); ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5;
   }
   blob_bottom_vec_.push_back(blob_bottom_label_);
 }
 SoftmaxWithLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(10, 5, 1, 1)),
       blob_bottom_label_(new Blob<Dtype>(10, 1, 1, 1)) {
   // fill the values
   FillerParameter filler_param;
   filler_param.set_std(10);
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_data_);
   for (int i = 0; i < blob_bottom_label_->count(); ++i) {
     blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 5;
   }
   blob_bottom_vec_.push_back(blob_bottom_label_);
 }
void VolumeDataLayer<Dtype>::CreatePrefetchThread() {
  phase_ = Caffe::phase();
  const bool prefetch_needs_rand = (phase_ == Caffe::TRAIN) &&
      (this->layer_param_.data_param().mirror() ||
       this->layer_param_.data_param().crop_size());
  if (prefetch_needs_rand) {
    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed));
  } else {
    prefetch_rng_.reset();
  }
  // Create the thread.
  CHECK(!pthread_create(&thread_, NULL, VolumeDataLayerPrefetch<Dtype>,
        static_cast<void*>(this))) << "Pthread execution failed.";
}
Beispiel #28
0
  virtual void FillBottoms() {
    // fill the probability values
    FillerParameter filler_param;
    GaussianFiller<Dtype> filler(filler_param);
    filler.Fill(this->blob_bottom_data_);

    const unsigned int prefetch_rng_seed = caffe_rng_rand();
    shared_ptr<Caffe::RNG> rng(new Caffe::RNG(prefetch_rng_seed));
    caffe::rng_t* prefetch_rng =
          static_cast<caffe::rng_t*>(rng->generator());
    Dtype* label_data = blob_bottom_label_->mutable_cpu_data();
    for (int i = 0; i < blob_bottom_label_->count(); ++i) {
      label_data[i] = (*prefetch_rng)() % 10;
    }
  }
 OrdinalRegressionLossLayerTest()
     : blob_bottom_data_(new Blob<Dtype>(100, 200, 1, 1)),
       blob_bottom_label_(new Blob<Dtype>(100, 1, 1, 1)),
       blob_top_loss_(new Blob<Dtype>()) {
   FillerParameter filler_param;
   filler_param.set_std(10);
   GaussianFiller<Dtype> filler(filler_param);
   filler.Fill(this->blob_bottom_data_);
   for (int i = 0; i < blob_bottom_label_->count(); i++) {
     blob_bottom_label_->mutable_cpu_data()[i] = caffe_rng_rand() % 100;
   }
   blob_bottom_vec_.push_back(blob_bottom_data_);
   blob_bottom_vec_.push_back(blob_bottom_label_);
   blob_top_vec_.push_back(blob_top_loss_);
 }
Beispiel #30
0
void InternalThread::StartInternalThread(device* device_context) {
  CHECK(!is_started()) << "Threads should persist and not be restarted.";

  thread_device_ = device_context;

  Caffe::Brew mode = Caffe::mode();
  int rand_seed = caffe_rng_rand();
  int solver_count = Caffe::solver_count();
  bool root_solver = Caffe::root_solver();

  try {
    thread_.reset(
        new boost::thread(&InternalThread::entry, this, thread_device_,
                          mode, rand_seed, solver_count, root_solver));
  } catch (std::exception& e) {
    LOG(FATAL)<< "Thread exception: " << e.what();
  }
}