void MyDataLayer<Dtype>::load_batch(Batch<Dtype>* batch){
    CPUTimer batch_timer;
    batch_timer.Start();
    double read_time = 0;
    double trans_time = 0;
    CPUTimer timer;
    CHECK(batch->data_.count());
    CHECK(this->transformed_data_.count());

    MyDataParameter my_data_param = this-> layer_param_.my_data_param();
    // Get batch size
    const int batch_size = my_data_param.batch_size();

    // Reshape according to the first image of each batch
    // on single input batches allows for inputs of varying dimension
    cv::Mat cv_img = samples_[lines_id_].first;
    CHECK(cv_img.data) << "Could not load "<<lines_id_<<" sample";
    // Use data_transformer to infer the expected blob shape from a cv_img
    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
    this->transformed_data_.Reshape(top_shape);
    // Reshape batch according to the batch_size
    top_shape[0] = batch_size;
    batch->data_.Reshape(top_shape);
    
    Dtype* prefetch_data = batch->data_.mutable_cpu_data();
    Dtype* prefetch_label= batch->label_.mutable_cpu_data();

    // datum scales
    int samples_size = samples_.size();
    for(int item_id=0;item_id<batch_size;++item_id){
      // get a blob
      timer.Start();
      CHECK_GT(samples_size, lines_id_);
      cv::Mat sample = samples_[lines_id_].first;
      CHECK(sample.data) << "Could not load "<<lines_id_<<" sample";
      read_time += timer.MicroSeconds();
      timer.Start();
      // apply transformations to the image
      int offset = batch->data_.offset(item_id);
      this->transformed_data_.set_cpu_data(prefetch_data + offset);
      this->data_transformer_->Transform(sample,&(this->transformed_data_));
      trans_time += timer.MicroSeconds();

      prefetch_label[item_id] = samples_[lines_id_].second;
      // got the the next iter
      lines_id_++;
      if(lines_id_>=samples_size){
              // We have reached the end. restart from the first.
	      DLOG(INFO) << "Restarting data prefetching from start.";
	      lines_id_=0;
	      if(my_data_param.shuffle()){
		      ShuffleImages();
	      }
      }
    }
    batch_timer.Stop();
    DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
    DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
    DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #2
0
void DataLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape according to the first datum of each batch
  // on single input batches allows for inputs of varying dimension.
  const int batch_size = this->layer_param_.data_param().batch_size();
  Datum datum;
  datum.ParseFromString(cursor_->value());
  // Use data_transformer to infer the expected blob shape from datum.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
  this->transformed_data_.Reshape(top_shape);
  // Reshape prefetch_data according to the batch_size.
  top_shape[0] = batch_size;
  this->prefetch_data_.Reshape(top_shape);

  Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables

  if (this->output_labels_) {
    top_label = this->prefetch_label_.mutable_cpu_data();
  }
  timer.Start();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a datum
    Datum datum;
    datum.ParseFromString(cursor_->value());
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply data transformations (mirror, scale, crop...)
    int offset = this->prefetch_data_.offset(item_id);
    this->transformed_data_.set_cpu_data(top_data + offset);
    this->data_transformer_->Transform(datum, &(this->transformed_data_));
    // Copy label.
    if (this->output_labels_) {
      top_label[item_id] = datum.label();
    }
    trans_time += timer.MicroSeconds();
    timer.Start();
    // go to the next item.
    cursor_->Next();
    if (!cursor_->valid()) {
      DLOG(INFO) << "Restarting data prefetching from start.";
      cursor_->SeekToFirst();
    }
  }
  timer.Stop();
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
void FlowDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape according to the first datum of each batch
  // on single input batches allows for inputs of varying dimension.
  const int batch_size = this->layer_param_.flow_data_param().batch_size();
  Datum& datum = *(reader_.full().peek());
  // Use data_transformer to infer the expected blob shape from datum.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
  top_shape[0] = num_test_views_;
  this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size * num_test_views_;
  batch->data_.Reshape(top_shape);

  Dtype* top_data = batch->data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables

  if (this->output_labels_) {
    top_label = batch->label_.mutable_cpu_data();
  }
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    timer.Start();
    // get a datum
    Datum& datum = *(reader_.full().pop("Waiting for flow data"));
    read_time += timer.MicroSeconds();
//    DLOG(INFO) << "number of data in full queue: " << reader_.full().size();
    timer.Start();
    // Apply data transformations (mirror, scale, crop...)
    int offset = batch->data_.offset(item_id * num_test_views_);
    this->transformed_data_.set_cpu_data(top_data + offset);
    if (this->phase_ == TRAIN)
        this->data_transformer_->TransformVariedSizeDatum(datum, &(this->transformed_data_));
    else if (this->phase_ == TEST)
        this->data_transformer_->TransformVariedSizeTestDatum(datum, &(this->transformed_data_), num_test_views_);

    // Copy label.
    if (this->output_labels_) {
      top_label[item_id] = datum.label();
    }
    trans_time += timer.MicroSeconds();

    reader_.free().push(const_cast<Datum*>(&datum));
  }
  timer.Stop();
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #4
0
void ImageDataLayer<Dtype>::InternalThreadEntry() {
    CPUTimer batch_timer;
    batch_timer.Start();
    double read_time = 0;
    double trans_time = 0;
    CPUTimer timer;
    CHECK(this->prefetch_data_.count());
    CHECK(this->transformed_data_.count());
    Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
    Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
    ImageDataParameter image_data_param = this->layer_param_.image_data_param();
    const int batch_size = image_data_param.batch_size();
    const int new_height = image_data_param.new_height();
    const int new_width = image_data_param.new_width();
    const bool is_color = image_data_param.is_color();
    string root_folder = image_data_param.root_folder();

    // datum scales
    const int lines_size = lines_.size();
    for (int item_id = 0; item_id < batch_size; ++item_id) {
        // get a blob
        timer.Start();
        CHECK_GT(lines_size, lines_id_);
        cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                          new_height, new_width, is_color);
        if (!cv_img.data) {
            continue;
        }
        read_time += timer.MicroSeconds();
        timer.Start();
        // Apply transformations (mirror, crop...) to the image
        int offset = this->prefetch_data_.offset(item_id);
        this->transformed_data_.set_cpu_data(top_data + offset);
        this->data_transformer_.Transform(cv_img, &(this->transformed_data_));
        trans_time += timer.MicroSeconds();

        top_label[item_id] = lines_[lines_id_].second;
        // go to the next iter
        lines_id_++;
        if (lines_id_ >= lines_size) {
            // We have reached the end. Restart from the first.
            DLOG(INFO) << "Restarting data prefetching from start.";
            lines_id_ = 0;
            if (this->layer_param_.image_data_param().shuffle()) {
                ShuffleImages();
            }
        }
    }
    batch_timer.Stop();
    DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
    DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
    DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #5
0
void SegDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {

    CPUTimer batch_timer;
    batch_timer.Start();
    double deque_time = 0;
    double trans_time = 0;
    CPUTimer timer;
    CHECK(batch->data_.count());
    CHECK(this->transformed_data_.count());

    // Reshape on single input batches for inputs of varying dimension.
    const int batch_size = this->layer_param_.data_param().batch_size();
    Dtype* top_data = batch->data_.mutable_cpu_data();
    Dtype* top_label = NULL;  // suppress warnings about uninitialized variables

    if (this->output_labels_) {
        top_label = batch->label_.mutable_cpu_data();
    }
    for (int item_id = 0; item_id < batch_size; ++item_id) {
        // get a blob
        timer.Start();
        Datum& datum = *(reader_.full().pop("Waiting for data"));
        deque_time += timer.MicroSeconds();

        // Apply data transformations (mirror, scale, crop...)
        timer.Start();
        const int offset_data = batch->data_.offset(item_id);
        const int offset_label = batch->label_.offset(item_id);
        this->transformed_data_.set_cpu_data(top_data + offset_data);
        this->transformed_label_.set_cpu_data(top_label + offset_label);
        this->data_transformer_->SegTransform(datum,
                                              &(this->transformed_data_),
                                              &(this->transformed_label_));
        trans_time += timer.MicroSeconds();
        reader_.free().push(const_cast<Datum*>(&datum));
    }
    timer.Stop();
    batch_timer.Stop();

#ifdef BENCHMARK_DATA
    LOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
    LOG(INFO) << "  Dequeue time: " << deque_time / 1000 << " ms.";
    LOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
#endif
}
Example #6
0
void BinaryDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  static int time_idx = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  string root_folder = image_data_param.root_folder();
  const int batch_size = this->layer_param_.image_data_param().batch_size();

  const vector<int> & top_shape = this->top_shape_;
  // Reshape batch according to the batch_size.
  batch->data_.Reshape(top_shape);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();
  Dtype* prefetch_label = batch->label_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  const int count = top_shape[1] * top_shape[2] * top_shape[3];
  for (int item_id = 0; item_id < batch_size; ++item_id) {
      // get a blob
      timer.Start();
      CHECK_GT(lines_size, lines_id_);
      int offset = batch->data_.offset(item_id);
      int ret = ReadBinaryBlob(root_folder + lines_[lines_id_].first,
          prefetch_data + offset, count);
      read_time += timer.MicroSeconds();
      CHECK(ret == 0) << "Could not load " << lines_[lines_id_].first;

      prefetch_label[item_id] = lines_[lines_id_].second;
    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
}
void DataLstmTrainHistLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());

  Datum datum;
  Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
  Dtype* top_hist = this->prefetch_hist_.mutable_cpu_data();
  Dtype* top_marker = this->prefetch_marker_.mutable_cpu_data();

  // datum scales
  const int size = resize_height*resize_width*3;
  const Dtype* mean = this->data_mean_.mutable_cpu_data();

  string value;
  const int kMaxKeyLength = 256;
  char key_cstr[kMaxKeyLength];
  int key;

  const int sequence_size = this->layer_param_.data_lstm_train_hist_param().sequence_size();
  const int ind_seq_num=this->layer_param_.data_lstm_train_hist_param().sequence_num();
  const int interval=this->layer_param_.data_lstm_train_hist_param().interval();
  int item_id;

  for (int time_id = 0; time_id < sequence_size; ++time_id) {
     for (int seq_id = 0; seq_id < ind_seq_num; ++seq_id) {
        item_id=time_id*ind_seq_num+seq_id;
        timer.Start();
        // get a blob

        key=buffer_key[seq_id];  // MUST be changed according to the size of the training set

        snprintf(key_cstr, kMaxKeyLength, "%08d", key);
        db_->Get(leveldb::ReadOptions(), string(key_cstr), &value);
        datum.ParseFromString(value);
        const string& data = datum.data();

        read_time += timer.MicroSeconds();
        timer.Start();

        for (int j = 0; j < size; ++j) {
           Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[j]));
           top_data[item_id * size + j] = (datum_element - mean[j]);
        }

        for (int j = 0; j < para_dim; ++j) { 
           top_label[item_id * para_dim + j] = datum.float_data(j); 
        }

        top_marker[item_id] = datum.float_data(para_dim);

        if (buffer_marker[seq_id] == 0) {
            top_marker[item_id] = 0;   
            buffer_marker[seq_id] = 1;
        }

        //////////////////////////////////// for hist
        if (top_marker[item_id] < 0.5) {
           for (int j = 0; j < para_dim; ++j)
               top_hist[item_id * para_dim + j] = 0; 
        } else {
           if (time_id == 0) {
              top_hist[item_id * para_dim + 0] = hist_blob[seq_id * para_dim + 0]/1.1+0.5;
              top_hist[item_id * para_dim + 1] = hist_blob[seq_id * para_dim + 1]*0.17778+1.34445;
              top_hist[item_id * para_dim + 2] = hist_blob[seq_id * para_dim + 2]*0.14545+0.39091;
              top_hist[item_id * para_dim + 3] = hist_blob[seq_id * para_dim + 3]*0.17778-0.34445;
              top_hist[item_id * para_dim + 4] = hist_blob[seq_id * para_dim + 4]/95.0+0.12;
              top_hist[item_id * para_dim + 5] = hist_blob[seq_id * para_dim + 5]/95.0+0.12;
              top_hist[item_id * para_dim + 6] = hist_blob[seq_id * para_dim + 6]*0.14545+1.48181;
              top_hist[item_id * para_dim + 7] = hist_blob[seq_id * para_dim + 7]*0.16+0.98;
              top_hist[item_id * para_dim + 8] = hist_blob[seq_id * para_dim + 8]*0.16+0.02;
              top_hist[item_id * para_dim + 9] = hist_blob[seq_id * para_dim + 9]*0.14545-0.48181;
              top_hist[item_id * para_dim + 10] = hist_blob[seq_id * para_dim + 10]/95.0+0.12;
              top_hist[item_id * para_dim + 11] = hist_blob[seq_id * para_dim + 11]/95.0+0.12;
              top_hist[item_id * para_dim + 12] = hist_blob[seq_id * para_dim + 12]/95.0+0.12;
              top_hist[item_id * para_dim + 13] = hist_blob[seq_id * para_dim + 13]*0.6+0.2;
           } else {
              int pre_id=(time_id-1)*ind_seq_num+seq_id;
              top_hist[item_id * para_dim + 0] = top_label[pre_id * para_dim + 0]/1.1+0.5;
              top_hist[item_id * para_dim + 1] = top_label[pre_id * para_dim + 1]*0.17778+1.34445;
              top_hist[item_id * para_dim + 2] = top_label[pre_id * para_dim + 2]*0.14545+0.39091;
              top_hist[item_id * para_dim + 3] = top_label[pre_id * para_dim + 3]*0.17778-0.34445;
              top_hist[item_id * para_dim + 4] = top_label[pre_id * para_dim + 4]/95.0+0.12;
              top_hist[item_id * para_dim + 5] = top_label[pre_id * para_dim + 5]/95.0+0.12;
              top_hist[item_id * para_dim + 6] = top_label[pre_id * para_dim + 6]*0.14545+1.48181;
              top_hist[item_id * para_dim + 7] = top_label[pre_id * para_dim + 7]*0.16+0.98;
              top_hist[item_id * para_dim + 8] = top_label[pre_id * para_dim + 8]*0.16+0.02;
              top_hist[item_id * para_dim + 9] = top_label[pre_id * para_dim + 9]*0.14545-0.48181;
              top_hist[item_id * para_dim + 10] = top_label[pre_id * para_dim + 10]/95.0+0.12;
              top_hist[item_id * para_dim + 11] = top_label[pre_id * para_dim + 11]/95.0+0.12;
              top_hist[item_id * para_dim + 12] = top_label[pre_id * para_dim + 12]/95.0+0.12;
              top_hist[item_id * para_dim + 13] = top_label[pre_id * para_dim + 13]*0.6+0.2;
           }
        }
        //////////////////////////////////// for hist

        trans_time += timer.MicroSeconds();

        buffer_key[seq_id]++;
        buffer_total[seq_id]++;
        if (buffer_key[seq_id]>total_frames || buffer_total[seq_id]>interval) {
           buffer_key[seq_id]=random(total_frames)+1;
           buffer_marker[seq_id]=0;
           buffer_total[seq_id]=0;
        }

        //////////////////////////////////// for hist
        if (time_id==sequence_size-1) {
           for (int j = 0; j < para_dim; ++j) 
               hist_blob[seq_id * para_dim + j] = datum.float_data(j); 
        }
        //////////////////////////////////// for hist

/*
        if (seq_id == 0) {
           for (int h = 0; h < resize_height; ++h) {
              for (int w = 0; w < resize_width; ++w) {
                 leveldbTrain->imageData[(h*resize_width+w)*3+0]=(uint8_t)data[h*resize_width+w];
                 leveldbTrain->imageData[(h*resize_width+w)*3+1]=(uint8_t)data[resize_height*resize_width+h*resize_width+w];
                 leveldbTrain->imageData[(h*resize_width+w)*3+2]=(uint8_t)data[resize_height*resize_width*2+h*resize_width+w];

                 //leveldbTrain->imageData[(h*resize_width+w)*3+0]=(uint8_t)top_data[item_id * size+h*resize_width+w];
                 //leveldbTrain->imageData[(h*resize_width+w)*3+1]=(uint8_t)top_data[item_id * size+resize_height*resize_width+h*resize_width+w];
                 //leveldbTrain->imageData[(h*resize_width+w)*3+2]=(uint8_t)top_data[item_id * size+resize_height*resize_width*2+h*resize_width+w];
               }
           }
           cvShowImage("Image from leveldb", leveldbTrain);
           cvWaitKey( 1 );
        }
*/
     }
  }

  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
  void MultiImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
    CPUTimer batch_timer;
    batch_timer.Start();
    double read_time = 0;
    double trans_time = 0;
    CPUTimer timer;
    CHECK(batch->data_.count());
    CHECK(this->transformed_data_.count());
    MultiImageDataParameter multi_image_data_param = this->layer_param_.multi_image_data_param();
    const int batch_size = multi_image_data_param.batch_size();
    const int new_height = multi_image_data_param.new_height();
    const int new_width = multi_image_data_param.new_width();
    const bool is_color = multi_image_data_param.is_color();
    string root_folder = multi_image_data_param.root_folder();
    const int num_images = this->layer_param_.multi_image_data_param().num_images();
    
    // Reshape according to the first image of each batch
    // on single input batches allows for inputs of varying dimension.
    cv::Mat cv_img = ReadImageToCVMat(root_folder + *lines_[lines_id_].first.begin(),
                                      new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << *lines_[lines_id_].first.begin();
    // Use data_transformer to infer the expected blob shape from a cv_img.
    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
    this->transformed_data_.Reshape(top_shape);
    top_shape[1] *= num_images;
    // Reshape batch according to the batch_size.
    top_shape[0] = batch_size;
    batch->data_.Reshape(top_shape);

    Dtype* prefetch_data = batch->data_.mutable_cpu_data();
    Dtype* prefetch_label = batch->label_.mutable_cpu_data();

    // datum scales
    const int lines_size = lines_.size();
    for (int item_id = 0; item_id < batch_size; ++item_id) {
      // get a blob
      timer.Start();
      CHECK_GT(lines_size, lines_id_);
      
      if (this->layer_param_.multi_image_data_param().shuffle_images() == true) {
	caffe::rng_t* prefetch_rng =
	  static_cast<caffe::rng_t*>(prefetch_rng_->generator());
	shuffle(lines_[lines_id_].first.begin(), lines_[lines_id_].first.end(), prefetch_rng);
      }
      read_time += timer.MicroSeconds();
      timer.Start();
      for (int image_index = 0; image_index < num_images; image_index++) {
             cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first[image_index], new_height, new_width, is_color);
             CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first[image_index];
             // Apply transformations (mirror, crop...) to the image
             int offset = batch->data_.offset(item_id, image_index * cv_img.channels());
             this->transformed_data_.set_cpu_data(prefetch_data + offset);
             this->data_transformer_->Transform(cv_img, &(this->transformed_data_));	     
      }

      trans_time += timer.MicroSeconds();

      prefetch_label[item_id] = lines_[lines_id_].second;
      // go to the next iter
      lines_id_++;
      if (lines_id_ >= lines_size) {
        // We have reached the end. Restart from the first.
        DLOG(INFO) << "Restarting data prefetching from start.";
        lines_id_ = 0;
        if (this->layer_param_.multi_image_data_param().shuffle()) {
          ShuffleImages();
        }
      }
    }
    batch_timer.Stop();
    DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
    DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
    DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
  }
  void MILDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
    CPUTimer timer;
    timer.Start();
    CHECK(batch->data_.count());

    //Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
    //Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
    Dtype* top_data = batch->data_.mutable_cpu_data();
    Dtype* top_label = batch->label_.mutable_cpu_data();
    const int img_size = this->transform_param_.crop_size();
    const int channels = this->layer_param_.mil_data_param().channels();
    const int scale = this->transform_param_.scale();
    const bool mirror = this->transform_param_.mirror();

    const int images_per_batch = this->layer_param_.mil_data_param().images_per_batch();
    const int n_classes = this->layer_param_.mil_data_param().n_classes();
    const int num_scales = this->layer_param_.mil_data_param().num_scales();
    const float scale_factor = this->layer_param_.mil_data_param().scale_factor();

    // zero out batch
    //caffe_set(this->prefetch_data_.count(), Dtype(0), top_data);
    caffe_set(batch->data_.count(), Dtype(0), top_data);
    int item_id;
    for(int i_image = 0; i_image < images_per_batch; i_image++){
      // Sample which image to read
      unsigned int index = counter_; counter_ = counter_ + 1;
      const unsigned int rand_index = this->PrefetchRand();
      if(this->layer_param_.mil_data_param().randomize())
        index = rand_index;

      // LOG(INFO) << index % this->num_images_ << ", " << this->num_images_;
      pair<string, string> p = this->image_database_[index % this->num_images_];
      string im_name = p.first;
      string full_im_name = p.second;
    
      cv::Mat cv_img = cv::imread(full_im_name, CV_LOAD_IMAGE_COLOR);
      if (!cv_img.data) {
        LOG(ERROR) << "Could not open or find file " << full_im_name;
        return;
      }
    
      //REVIEW ktran: do not hardcode dataset name (or its prefix "/labels-")
      //REVIEW ktran: also do not use deep dataset name so that we don't have to modify the core caffe code
      //(ref: https://github.com/BVLC/caffe/commit/a0787631a27ca6478f70341462aafdcf35dabb19)
      hdf5_load_nd_dataset(this->label_file_id_, string("/labels-"+im_name).c_str(), 4, 4, &this->label_blob_);
      const Dtype* label = label_blob_.mutable_cpu_data();
    
      CHECK_EQ(label_blob_.width(), 1)          << "Expected width of label to be 1." ;
      CHECK_EQ(label_blob_.height(), n_classes) << "Expected height of label to be " << n_classes;
      CHECK_EQ(label_blob_.channels(), 1)       << "Expected channels of label to be 1." ;
      CHECK_EQ(label_blob_.num(), 1)            << "Expected num of label to be 1." ;

      float img_size_i = img_size;
      for(int i_scales = 0; i_scales < num_scales; i_scales++){
        // Resize such that the image is of size img_size, img_size
        item_id = i_image*num_scales + i_scales;
        // LOG(INFO) << "MIL Data Layer: scale: " << (int) round(img_size_i);
        cv::Mat cv_cropped_img = Transform_IDL(cv_img, static_cast<int>(round(img_size_i)), mirror);
        for (int c = 0; c < channels; ++c) {
          for (int h = 0; h < cv_cropped_img.rows; ++h) {
            for (int w = 0; w < cv_cropped_img.cols; ++w) {
              Dtype pixel =
                  static_cast<Dtype>(cv_cropped_img.at<cv::Vec3b>(h, w)[c]);
              top_data[((item_id * channels + c) * img_size + h)
                       * img_size + w]
                  = (pixel - static_cast<Dtype>(mean_value_[c]))*scale;
            }
          }
        }
        img_size_i = std::max(static_cast<float>(1.), img_size_i*scale_factor);
      }
      
      for(int i_label = 0; i_label < n_classes; i_label++){
        top_label[i_image*n_classes + i_label] = 
          label[i_label];
      }
    }

    timer.Stop();
    DLOG(INFO) << "Prefetch batch: " << timer.MilliSeconds() << " ms.";
  }
void DataLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape on single input batches for inputs of varying dimension.
  const int batch_size = this->layer_param_.data_param().batch_size();
  const int crop_size = this->layer_param_.transform_param().crop_size();
  if (batch_size == 1 && crop_size == 0) {
    Datum datum;
    datum.ParseFromString(cursor_->value());
    this->prefetch_data_.Reshape(1, datum.channels(),
        datum.height(), datum.width());
    this->transformed_data_.Reshape(1, datum.channels(),
        datum.height(), datum.width());
  }

  Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables

  if (this->output_labels_) {
    top_label = this->prefetch_label_.mutable_cpu_data();
  }
  bool force_color = this->layer_param_.data_param().force_encoded_color();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    timer.Start();
    // get a blob
    Datum datum;
    datum.ParseFromString(cursor_->value());

    cv::Mat cv_img;
    if (datum.encoded()) {
      if (force_color) {
        cv_img = DecodeDatumToCVMat(datum, true);
      } else {
        cv_img = DecodeDatumToCVMatNative(datum);
      }
      if (cv_img.channels() != this->transformed_data_.channels()) {
        LOG(WARNING) << "Your dataset contains encoded images with mixed "
        << "channel sizes. Consider adding a 'force_color' flag to the "
        << "model definition, or rebuild your dataset using "
        << "convert_imageset.";
      }
    }
    read_time += timer.MicroSeconds();
    timer.Start();

    // Apply data transformations (mirror, scale, crop...)
    int offset = this->prefetch_data_.offset(item_id);
    this->transformed_data_.set_cpu_data(top_data + offset);
    if (datum.encoded()) {
      this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    } else {
      this->data_transformer_->Transform(datum, &(this->transformed_data_));
    }
    if (this->output_labels_) {
      for (int label_i = 0; label_i < datum.label_size(); ++label_i){
        top_label[item_id * datum.label_size() + label_i] = datum.label(label_i);
      }
      //top_label[item_id] = datum.label();
    }
    trans_time += timer.MicroSeconds();
    // go to the next iter
    cursor_->Next();
    if (!cursor_->valid()) {
      DLOG(INFO) << "Restarting data prefetching from start.";
      cursor_->SeekToFirst();
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #11
0
void DepthDataLayer<Dtype>::InternalThreadEntry() {
    CPUTimer batch_timer;
    batch_timer.Start();
    double read_time = 0;
    double trans_time = 0;
    CPUTimer timer;
    CHECK(this->prefetch_data_.count());
    CHECK(this->transformed_data_.count());
    //CHECK(this->transformed_data_.count());
    DepthDataParameter depth_data_param = this->layer_param_.depth_data_param();
    const int batch_size = depth_data_param.batch_size();
    const int new_height = depth_data_param.new_height();
    const int new_width = depth_data_param.new_width();
    const bool is_color = depth_data_param.is_color();
    string root_folder = depth_data_param.root_folder();

    // Reshape according to the first image of each batch
    // on single input batches allows for inputs of varying dimension.
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                      new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
    // Use data_transformer to infer the expected blob shape from a cv_img.
    vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
    this->transformed_data_.Reshape(top_shape);
    // Reshape prefetch_data according to the batch_size.
    top_shape[0] = batch_size;
    this->prefetch_data_.Reshape(top_shape);

    Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
    Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();

    // datum scales
    const int lines_size = lines_.size();
    for (int item_id = 0; item_id < batch_size; ++item_id) {
        // get a blob
        timer.Start();
        CHECK_GT(lines_size, lines_id_);
        cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                          new_height, new_width, is_color);
        CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
        read_time += timer.MicroSeconds();
        timer.Start();

        int offset = this->prefetch_data_.offset(item_id);
        this->transformed_data_.set_cpu_data(prefetch_data + offset);
        this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
        trans_time += timer.MicroSeconds();

        //read Depths
        //prefetch_label[item_id] = lines_[lines_id_].second;
        float depths[74*74];
        ReadDepthToArray(lines_[lines_id_].second, depths);
        int depth_offset = this->prefetch_label_.offset(item_id);
        memcpy(&prefetch_label[depth_offset], &depths[0], sizeof(depths));

        // go to the next iter
        lines_id_++;
        if (lines_id_ >= lines_size) {
            // We have reached the end. Restart from the first.
            DLOG(INFO) << "Restarting data prefetching from start.";
            lines_id_ = 0;
            ShuffleImages();
        }
    }
    batch_timer.Stop();
    DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
    DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
    DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #12
0
void ImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {  
  CPUTimer batch_timer;  
  batch_timer.Start();  
  double read_time = 0;  
  double trans_time = 0;  
  CPUTimer timer;  
  CHECK(batch->data_.count());  
  CHECK(this->transformed_data_.count());  
  // 获取层参数,具体参见层参数的定义的解释  
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();  
  const int batch_size = image_data_param.batch_size();  
  const int new_height = image_data_param.new_height();  
  const int new_width = image_data_param.new_width();  
  const bool is_color = image_data_param.is_color();  
  string root_folder = image_data_param.root_folder();  
  
  // Reshape according to the first image of each batch  
  // on single input batches allows for inputs of varying dimension.  
  // 读取跳过之后的第一幅图像,然后根据该图像设置相撞  
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,  
      new_height, new_width, is_color);  
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;  
  // Use data_transformer to infer the expected blob shape from a cv_img.  
  // 推断图像形状  
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);  
  // 设置transformed_data_形状  
  this->transformed_data_.Reshape(top_shape);  
  // Reshape batch according to the batch_size.  
  // 设置batch_size  
  top_shape[0] = batch_size;  
  batch->data_.Reshape(top_shape);  
  
  Dtype* prefetch_data = batch->data_.mutable_cpu_data();  
  Dtype* prefetch_label = batch->label_.mutable_cpu_data();  
  
  // datum scales  
  // 读取一批图像,并进行预处理  
  const int lines_size = lines_.size();  
  for (int item_id = 0; item_id < batch_size; ++item_id) {  
    // get a blob  
    timer.Start();  
    CHECK_GT(lines_size, lines_id_);  
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,  
        new_height, new_width, is_color);  
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;  
    read_time += timer.MicroSeconds();  
    timer.Start();  
    // Apply transformations (mirror, crop...) to the image  
    // 进行预处理  
  
    // 根据图像的批次获得图像数据的偏移量  
    int offset = batch->data_.offset(item_id);  
    // 设置图像数据的指针到transformed_data_  
    this->transformed_data_.set_cpu_data(prefetch_data + offset);  
    // 进行预处理  
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));  
    trans_time += timer.MicroSeconds();//统计预处理时间  
  
    // 复制类标到prefetch_label  
    prefetch_label[item_id] = lines_[lines_id_].second;  
    // go to the next iter  
    lines_id_++;  
    // 是否是图像目录中的最后一个图像  
    if (lines_id_ >= lines_size) {  
      // We have reached the end. Restart from the first.  
      DLOG(INFO) << "Restarting data prefetching from start.";  
      lines_id_ = 0;  
      // 打乱图像索引的顺序  
      if (this->layer_param_.image_data_param().shuffle()) {  
        ShuffleImages();  
      }  
    }  
  }  
  batch_timer.Stop();  
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";  
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";  
  // 预处理时间  
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";  
}  
void MultiLabelImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();
  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
      new_height, new_width, is_color);
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();
  Dtype* prefetch_label = batch->label_.mutable_cpu_data();
  Dtype* prefetch_mask = batch->mask_.mutable_cpu_data();
  
  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
        new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = batch->data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    trans_time += timer.MicroSeconds();

    for(int label_ind = 0; label_ind < label_num_; ++ label_ind){
      int offset = batch->label_.offset(item_id,label_ind);
      prefetch_label[offset] = lines_[lines_id_].second[label_ind] / scale_;

      // prefetch_mask[offset] = static_cast<int>(prefetch_label[offset]) != 0 ? prefetch_label[offset] / 100.0 : Dtype(1.) ; 
      if (need_output_mask_){
        if (static_cast<int>(prefetch_label[offset]) != 0){
            prefetch_mask[offset] = 1.;
        }else if (caffe_rng_rand() % 10 < 1){
            prefetch_mask[offset] = 1.;
        }else{
            prefetch_mask[offset] = 0.;
        }
      }
      // LOG(INFO) << " aimed labels " << prefetch_label[offset]<< " AIMed masks: " << prefetch_mask[offset];
    }

    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #14
0
void FlowDataLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double decompress_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());
  CHECK(this->transformed_data_.count());
  FlowDataParameter flow_data_param = this->layer_param_.flow_data_param();
  const int batch_size = flow_data_param.batch_size();
  const int stack_size = flow_data_param.stack_size();
  const int height = flow_field_->height();
  const int width = flow_field_->width();
  const int data_dim = height * width * 2;

  Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();
  Dtype* flow_stack_data = flow_stack_->mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    CHECK_GT(lines_size, lines_id_ + stack_size - 1);
    // Takes a step of random size.
    if (flow_data_param.rand_step()) {
      unsigned int skip = caffe_rng_rand() % flow_data_param.rand_step();
      lines_id_ += (skip * stack_size);
      lines_id_ = lines_id_ % lines_size;
    }
    prefetch_label[item_id] = lines_[lines_id_].second;
    
    for (int flow_id = 0; flow_id < stack_size; ++flow_id) {
      // reads a compressed flow field.
      timer.Start();
      cv::Mat cv_img = ReadImageToCVMat(lines_[lines_id_].first,
          height, width, true);
      CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
      read_time += timer.MicroSeconds();

      // Decompress the flow.
      timer.Start();
      flow_field_->set_cpu_data(flow_stack_data + data_dim * flow_id);
      Decompress(cv_img, flow_field_.get());
      decompress_time += timer.MicroSeconds();
      lines_id_++;
    }
    // Apply transformations (mirror, crop...) to the flow stack.
    int offset = this->prefetch_data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(flow_stack_.get(), 
        &(this->transformed_data_));
    trans_time += timer.MicroSeconds();

    // go to the next iter
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "     Decompress time: " << decompress_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #15
0
void ImageLabelmapDataLayer<Dtype>::load_batch(LabelmapBatch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(batch->labelmap_.count());
  CHECK(this->transformed_data_.count());
  CHECK(this->transformed_labelmap_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();
  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
      new_height, new_width, is_color);
  cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
      new_height, new_width, 0);
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  vector<int> top_shape_labelmap = this->data_transformer_->InferBlobShape(cv_gt);
  
  this->transformed_data_.Reshape(top_shape);
  this->transformed_labelmap_.Reshape(top_shape_labelmap);
  // Reshape prefetch_data and top[0] according to the batch_size.
  top_shape[0] = batch_size;
  top_shape_labelmap[0] = batch_size;
  
  batch->data_.Reshape(top_shape);
  batch->labelmap_.Reshape(top_shape_labelmap);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();
  Dtype* prefetch_labelmap = batch->labelmap_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
                                    0, 0, is_color);
    cv::Mat cv_gt = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
                                    0, 0, 0);

    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;

    const int height = cv_img.rows;
    const int width = cv_img.cols;
    const int gt_channels = cv_gt.channels();
    const int gt_height = cv_gt.rows;
    const int gt_width = cv_gt.cols;

    CHECK((height == gt_height) && (width == gt_width)) << "GT image size should be equal to true image size";
    CHECK(gt_channels == 1) << "GT image channel number should be 1";
 
    if (new_height > 0 && new_width > 0) {
        cv::resize(cv_img, cv_img, cv::Size(new_width, new_height));
        cv::resize(cv_gt, cv_gt, cv::Size(new_width, new_height), 0, 0, cv::INTER_LINEAR);
    }

    if (!cv_img.data || !cv_gt.data) {
      continue;
    }


    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = batch->data_.offset(item_id);
    int offset_gt = batch->labelmap_.offset(item_id);
    //CHECK(offset == offset_gt) << "fetching should be synchronized";
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->transformed_labelmap_.set_cpu_data(prefetch_labelmap + offset_gt);
    std::pair<int, int> hw_off = this->data_transformer_->LocTransform(cv_img, &(this->transformed_data_));
    
    cv::Mat encoded_gt;
    //regression
    encoded_gt = cv_gt/255;
    //[***Cautions***]
    //One small trick leveraging opencv roundoff feature for **consensus sampling** in Holistically-Nested Edge Detection paper.
    //For general binary edge maps this is okay
    //For 5-subject aggregated edge maps (BSDS), this will abandon weak edge points labeled by only two or less labelers.

    this->data_transformer_->LabelmapTransform(encoded_gt, &(this->transformed_labelmap_), hw_off);
    
    trans_time += timer.MicroSeconds();

    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #16
0
void SiameseDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();
  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();  
  const int interpolation = image_data_param.interpolation();
  const int resize_mode = image_data_param.resize_mode();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + pair_lines_[lines_id_].first,
      new_height, new_width, is_color, interpolation, resize_mode);
  CHECK(cv_img.data) << "Could not load " << pair_lines_[lines_id_].first;
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();
  Dtype* prefetch_label = batch->label_.mutable_cpu_data();

  // datum scales
  const int lines_size = pair_lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
	cv::Mat cv_img = ReadImageToCVMat(root_folder + pair_lines_[lines_id_].first,
        new_height, new_width, is_color, interpolation, resize_mode);
	CHECK(cv_img.data) << "Could not load " << pair_lines_[lines_id_].first;
    read_time += timer.MicroSeconds();
    timer.Start();
	//cv::imwrite("aa.jpg", cv_img);
    // Apply transformations (mirror, crop...) to the image
    int offset = batch->data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    trans_time += timer.MicroSeconds();

	prefetch_label[item_id] = pair_lines_[lines_id_].second;
    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #17
0
void ImageDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();

  const int min_height = image_data_param.min_height();
  const int min_width  = image_data_param.min_width();

  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
      new_height, new_width, is_color, min_height, min_width);
  CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();
  Dtype* prefetch_label = batch->label_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();

#ifdef _OPENMP
  #pragma omp parallel if (batch_size > 1)
  #pragma omp single nowait
#endif
  for (int item_id = 0; item_id < batch_size; ++item_id)  {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
#ifndef _OPENMP
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
        new_height, new_width, is_color, min_height, min_width);
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
    read_time += timer.MicroSeconds();
    timer.Start();
// Apply transformations (mirror, crop...) to the image

    int offset = batch->data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    trans_time += timer.MicroSeconds();
#else
    read_time = 0;
    trans_time = 0;

    int offset = batch->data_.offset(item_id);
    std::string img_file_name = lines_[lines_id_].first;
    PreclcRandomNumbers precalculated_rand_numbers;
    this->data_transformer_->GenerateRandNumbers(precalculated_rand_numbers);
    #pragma omp task firstprivate(offset, img_file_name, \
                                                    precalculated_rand_numbers)
    {
        cv::Mat cv_img = ReadImageToCVMat(root_folder + img_file_name,
            new_height, new_width, is_color);
        CHECK(cv_img.data) << "Could not load " << img_file_name;

        Blob<Dtype> tmp_data;
        tmp_data.Reshape(top_shape);
        tmp_data.set_cpu_data(prefetch_data + offset);
        this->data_transformer_->Transform(cv_img, &tmp_data, 
                                                  precalculated_rand_numbers);
    }
#endif

    prefetch_label[item_id] = lines_[lines_id_].second;
    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }

  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #18
0
void DataDrivingLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());

  Datum datum;
  Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* top_label = this->prefetch_label_.mutable_cpu_data();

  // datum scales
  const int size = resize_height*resize_width*3;
  const Dtype* mean = this->data_mean_.mutable_cpu_data();

  string value;
  const int kMaxKeyLength = 256;
  char key_cstr[kMaxKeyLength];
  int key;

  const int batch_size = this->layer_param_.data_driving_param().batch_size();

  for (int item_id = 0; item_id < batch_size; ++item_id) {

      timer.Start();
      // get a blob

      key=random(484815)+1;  // MUST be changed according to the size of the training set

      snprintf(key_cstr, kMaxKeyLength, "%08d", key);
      db_->Get(leveldb::ReadOptions(), string(key_cstr), &value);
      datum.ParseFromString(value);
      const string& data = datum.data();

      read_time += timer.MicroSeconds();
      timer.Start();

      for (int j = 0; j < size; ++j) {
         Dtype datum_element = static_cast<Dtype>(static_cast<uint8_t>(data[j]));
         top_data[item_id * size + j] = (datum_element - mean[j]);
      }

      for (int j = 0; j < para_dim; ++j) { 
         top_label[item_id*para_dim+j] = datum.float_data(j); 
      }

      trans_time += timer.MicroSeconds();
/*
      for (int h = 0; h < resize_height; ++h) {
         for (int w = 0; w < resize_width; ++w) {
            leveldbTrain->imageData[(h*resize_width+w)*3+0]=(uint8_t)data[h*resize_width+w];
            leveldbTrain->imageData[(h*resize_width+w)*3+1]=(uint8_t)data[resize_height*resize_width+h*resize_width+w];
            leveldbTrain->imageData[(h*resize_width+w)*3+2]=(uint8_t)data[resize_height*resize_width*2+h*resize_width+w];

            //leveldbTrain->imageData[(h*resize_width+w)*3+0]=(uint8_t)top_data[item_id * size+h*resize_width+w];
            //leveldbTrain->imageData[(h*resize_width+w)*3+1]=(uint8_t)top_data[item_id * size+resize_height*resize_width+h*resize_width+w];
            //leveldbTrain->imageData[(h*resize_width+w)*3+2]=(uint8_t)top_data[item_id * size+resize_height*resize_width*2+h*resize_width+w];
          }
      }
      cvShowImage("Image from leveldb", leveldbTrain);
      cvWaitKey( 1 );
*/
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #19
0
void FloDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  string root_folder = image_data_param.root_folder();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  int xSize, ySize;
  CHECK(readFloFile(root_folder + lines_[lines_id_].first, NULL, xSize, ySize))
      << "Could not load " << lines_[lines_id_].first;
  
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = vector<int>(4);
  top_shape[0] = 1;
  top_shape[1] = 2;
  top_shape[2] = ySize;
  top_shape[3] = xSize;
  
  //this->transformed_data_.Reshape(top_shape);
  
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);

  Dtype* prefetch_data = batch->data_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = batch->data_.offset(item_id);
    //this->transformed_data_.set_cpu_data(prefetch_data + offset);
    
    CHECK(readFloFile(root_folder + lines_[lines_id_].first, prefetch_data + offset, xSize, ySize))
        << "Could not load " << lines_[lines_id_].first;
    
    //this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    
    trans_time += timer.MicroSeconds();

    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #20
0
void AnnotatedDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape according to the first anno_datum of each batch
  // on single input batches allows for inputs of varying dimension.
  const int batch_size = this->layer_param_.data_param().batch_size();
  const AnnotatedDataParameter& anno_data_param =
      this->layer_param_.annotated_data_param();
  const TransformationParameter& transform_param =
    this->layer_param_.transform_param();

  AnnotatedDatum anno_datum;
  //AnnotatedDatum& anno_datum = *(reader_.full().peek());
  // Use data_transformer to infer the expected blob shape from anno_datum.
  //vector<int> top_shape =
  //    this->data_transformer_->InferBlobShape(anno_datum.datum());
  //this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  //top_shape[0] = batch_size;
  //batch->data_.Reshape(top_shape);

  //Dtype* top_data = batch->data_.mutable_cpu_data();
  //Dtype* top_label = NULL;  // suppress warnings about uninitialized variables
  //if (this->output_labels_ && !has_anno_type_) {
  //  top_label = batch->label_.mutable_cpu_data();
  //}

  // Store transformed annotation.
  map<int, vector<AnnotationGroup> > all_anno;
  int num_bboxes = 0;

  for (int item_id = 0; item_id < batch_size; ++item_id) {
    timer.Start();
    // get a anno_datum
    //AnnotatedDatum& anno_datum = *(reader_.full().pop("Waiting for data"));
    while (Skip()) {
      Next();
    }
    anno_datum.ParseFromString(cursor_->value());
    read_time += timer.MicroSeconds();
    //timer.Start();
    if (item_id == 0) {
      // Reshape according to the first datum of each batch
      // on single input batches allows for inputs of varying dimension.
      // Use data_transformer to infer the expected blob shape from datum.
      vector<int> top_shape = this->data_transformer_->InferBlobShape(anno_datum.datum());
      this->transformed_data_.Reshape(top_shape);
      // Reshape batch according to the batch_size.
      top_shape[0] = batch_size;
      batch->data_.Reshape(top_shape);
    }
    AnnotatedDatum distort_datum;
    AnnotatedDatum* expand_datum = NULL;
    if (transform_param.has_distort_param()) {
      distort_datum.CopyFrom(anno_datum);
      this->data_transformer_->DistortImage(anno_datum.datum(),
                                            distort_datum.mutable_datum());
      if (transform_param.has_expand_param()) {
        expand_datum = new AnnotatedDatum();
        this->data_transformer_->ExpandImage(distort_datum, expand_datum);
      } else {
        expand_datum = &distort_datum;
      }
    } else {
      if (transform_param.has_expand_param()) {
        expand_datum = new AnnotatedDatum();
        this->data_transformer_->ExpandImage(anno_datum, expand_datum);
      } else {
        expand_datum = &anno_datum;
      }
    }
    AnnotatedDatum* sampled_datum = NULL;
    bool has_sampled = false;
    if (batch_samplers_.size() > 0) {
      // Generate sampled bboxes from expand_datum.
      vector<NormalizedBBox> sampled_bboxes;
      GenerateBatchSamples(*expand_datum, batch_samplers_, &sampled_bboxes);
      if (sampled_bboxes.size() > 0) {
        // Randomly pick a sampled bbox and crop the expand_datum.
        int rand_idx = caffe_rng_rand() % sampled_bboxes.size();
        sampled_datum = new AnnotatedDatum();
        this->data_transformer_->CropImage(*expand_datum,
                                           sampled_bboxes[rand_idx],
                                           sampled_datum);
        has_sampled = true;
      } else {
        sampled_datum = expand_datum;
      }
    } else {
      sampled_datum = expand_datum;
    }
    CHECK(sampled_datum != NULL);
    timer.Start();
    vector<int> top_shape =
        this->data_transformer_->InferBlobShape(anno_datum.datum());
    vector<int> shape =
        this->data_transformer_->InferBlobShape(sampled_datum->datum());
    if (transform_param.has_resize_param()) {
      if (transform_param.resize_param().resize_mode() ==
          ResizeParameter_Resize_mode_FIT_SMALL_SIZE) {
        this->transformed_data_.Reshape(shape);
        batch->data_.Reshape(shape);
        //top_data = batch->data_.mutable_cpu_data();
      } else {
        CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4,
              shape.begin() + 1));
      }
    } else {
      CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4,
            shape.begin() + 1));
    }
    // Apply data transformations (mirror, scale, crop...)
    timer.Start();
    int offset = batch->data_.offset(item_id);
    Dtype* top_data = batch->data_.mutable_cpu_data();
    this->transformed_data_.set_cpu_data(top_data + offset);
    vector<AnnotationGroup> transformed_anno_vec;
    if (this->output_labels_) {
      if (has_anno_type_) {
        // Make sure all data have same annotation type.
        CHECK(sampled_datum->has_type()) << "Some datum misses AnnotationType.";
        if (anno_data_param.has_anno_type()) {
          sampled_datum->set_type(anno_type_);
        } else {
          CHECK_EQ(anno_type_, sampled_datum->type()) <<
              "Different AnnotationType.";
        }
        // Transform datum and annotation_group at the same time
        transformed_anno_vec.clear();
        this->data_transformer_->Transform(*sampled_datum,
                                           &(this->transformed_data_),
                                           &transformed_anno_vec);
        if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
          // Count the number of bboxes.
          for (int g = 0; g < transformed_anno_vec.size(); ++g) {
            num_bboxes += transformed_anno_vec[g].annotation_size();
          }
        } else {
          LOG(FATAL) << "Unknown annotation type.";
        }
        all_anno[item_id] = transformed_anno_vec;
      } else {
        this->data_transformer_->Transform(sampled_datum->datum(),
                                           &(this->transformed_data_));
        // Otherwise, store the label from datum.
        CHECK(sampled_datum->datum().has_label()) << "Cannot find any label.";
        Dtype* top_label = batch->label_.mutable_cpu_data();
        top_label[item_id] = sampled_datum->datum().label();
      }
    } else {
      this->data_transformer_->Transform(sampled_datum->datum(),
                                         &(this->transformed_data_));
    }
    // clear memory
    if (has_sampled) {
      delete sampled_datum;
    }
    if (transform_param.has_expand_param()) {
      delete expand_datum;
    }
    trans_time += timer.MicroSeconds();

    //reader_.free().push(const_cast<AnnotatedDatum*>(&anno_datum));
    Next();
  }

  // Store "rich" annotation if needed.
  if (this->output_labels_ && has_anno_type_) {
    vector<int> label_shape(4);
    if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
      label_shape[0] = 1;
      label_shape[1] = 1;
      label_shape[3] = 8;
      if (num_bboxes == 0) {
        // Store all -1 in the label.
        label_shape[2] = 1;
        batch->label_.Reshape(label_shape);
        caffe_set<Dtype>(8, -1, batch->label_.mutable_cpu_data());
      } else {
        // Reshape the label and store the annotation.
        label_shape[2] = num_bboxes;
        batch->label_.Reshape(label_shape);
        Dtype* top_label = batch->label_.mutable_cpu_data();
        int idx = 0;
        for (int item_id = 0; item_id < batch_size; ++item_id) {
          const vector<AnnotationGroup>& anno_vec = all_anno[item_id];
          for (int g = 0; g < anno_vec.size(); ++g) {
            const AnnotationGroup& anno_group = anno_vec[g];
            for (int a = 0; a < anno_group.annotation_size(); ++a) {
              const Annotation& anno = anno_group.annotation(a);
              const NormalizedBBox& bbox = anno.bbox();
              top_label[idx++] = item_id;
              top_label[idx++] = anno_group.group_label();
              top_label[idx++] = anno.instance_id();
              top_label[idx++] = bbox.xmin();
              top_label[idx++] = bbox.ymin();
              top_label[idx++] = bbox.xmax();
              top_label[idx++] = bbox.ymax();
              top_label[idx++] = bbox.difficult();
            }
          }
        }
      }
    } else {
      LOG(FATAL) << "Unknown annotation type.";
    }
  }
  timer.Stop();
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
void VideoDataLayer<Dtype>::InternalThreadEntry(){

	CHECK(this->prefetch_data_.count());
	Dtype* top_data = this->prefetch_data_.mutable_cpu_data();
	Dtype* top_label = this->prefetch_label_.mutable_cpu_data();
	VideoDataParameter video_data_param = this->layer_param_.video_data_param();
	const int thread_num = Caffe::getThreadNum();
	const int batch_size = video_data_param.batch_size() / thread_num;
	const int new_height = video_data_param.new_height();
	const int new_width = video_data_param.new_width();
	CHECK(new_height > 0 && new_width > 0) << "size_resize should be set";
	const int new_length = video_data_param.new_length();
	const int num_segments = video_data_param.num_segments();
	string root_folder = video_data_param.root_folder();
	const bool flow_is_color = this->layer_param_.video_data_param().flow_is_color();
	const int lines_size = lines_.size();

	CHECK(lines_size >= batch_size) << "too small number of data, will cause problem in parallel reading";

	CPUTimer timer;
	timer.Start();

	CHECK((batch_size % num_segments) == 0) << "batch_size % num_segments) != 0";
    const int batch_video = batch_size / num_segments;
	omp_set_num_threads(8);
#pragma omp parallel for
	for (int item_id = 0; item_id < batch_video; ++item_id){
		Datum datum;
		const int lines_id_loc = (lines_id_ + item_id) % lines_size;
		CHECK_GT(lines_size, lines_id_loc);

		const int len_vid = lines_duration_[lines_id_loc];
		CHECK_GT(len_vid, new_length);

		vector<int> offsets;		
		for (int i = 0; i < num_segments; ++i){
			if (this->phase_==TRAIN){
				CHECK(num_segments == 1);
				caffe::rng_t* frame_rng = static_cast<caffe::rng_t*>(frame_prefetch_rng_->generator());				
				int offset = (*frame_rng)() % (len_vid - new_length + 1);
				offsets.push_back(offset);
			} else{
				Linspace(0, len_vid-new_length, num_segments, offsets);
			}
		}
//		CPUTimer timer;
//		timer.Start();
		if (this->layer_param_.video_data_param().modality() == VideoDataParameter_Modality_FLOW){
			if(!ReadSegmentFlowToDatum(root_folder + lines_[lines_id_loc].first, lines_[lines_id_loc].second, offsets, new_height, new_width, new_length, &datum, flow_is_color))				
				continue;
		} else{		
			if(!ReadSegmentRGBToDatum(root_folder + lines_[lines_id_loc].first, lines_[lines_id_loc].second, offsets, new_height, new_width, new_length, &datum, true))
				continue;
		}
//		LOG(INFO) << "Read data cost " << timer.MicroSeconds() / 1000 << " ms";
		int offset1 = this->prefetch_data_.offset(num_segments * item_id);
		
//		CPUTimer timer2;
//		timer2.Start();
		Blob<Dtype> transformed_data_loc;
		vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
		transformed_data_loc.Reshape(top_shape);
		transformed_data_loc.set_cpu_data(top_data + offset1);
		const int chn_flow_single = flow_is_color ? 3 : 1;
		this->data_transformer_->Transform(datum, &(transformed_data_loc), chn_flow_single);
//		LOG(INFO) << "Transform cose " << timer.MicroSeconds() / 1000 << " ms";

		for (int kk = num_segments * item_id; kk < num_segments * (item_id + 1); kk++)
                   top_label[kk] = lines_[lines_id_loc].second;
	}

	//next iteration
	lines_id_ += batch_video;
	if (lines_id_ >= lines_size) {
		DLOG(INFO) << "Restarting data prefetching from start.";
		lines_id_ = 0;
		if(this->layer_param_.video_data_param().shuffle()){
			ShuffleVideos();
		}
	}
}
void MultiLabelImageDataLayer<Dtype>::InternalThreadEntry() {

  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();
  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();
  int max_label_num_ = this->layer_param_.image_data_param().max_label();

  // Reshape according to the first image of each batch
  // on single input batches allows for inputs of varying dimension.
  cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_],
      new_height, new_width, is_color);
  // Use data_transformer to infer the expected blob shape from a cv_img.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(cv_img);
  this->transformed_data_.Reshape(top_shape);
  // Reshape prefetch_data according to the batch_size.
  top_shape[0] = batch_size;
  this->prefetch_data_.Reshape(top_shape);

  Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_],
        new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_];
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = this->prefetch_data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    trans_time += timer.MicroSeconds();
    int t_counter = 0;
    for(t_counter = 0; t_counter < std::min((int)labels_[item_id].size(), max_label_num_); t_counter++){
      prefetch_label[item_id * max_label_num_ +  t_counter] = (Dtype)labels_[item_id][t_counter];
    }
    prefetch_label[item_id * max_label_num_ +  t_counter] = -1; // to indicate the end of valid labels
    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
void AnnotatedDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape according to the first anno_datum of each batch
  // on single input batches allows for inputs of varying dimension.
  const int batch_size = this->layer_param_.data_param().batch_size();
  const AnnotatedDataParameter& anno_data_param =
      this->layer_param_.annotated_data_param();
  const TransformationParameter& transform_param =
    this->layer_param_.transform_param();
  AnnotatedDatum& anno_datum = *(reader_.full().peek());
  // Use data_transformer to infer the expected blob shape from anno_datum.
  vector<int> top_shape =
      this->data_transformer_->InferBlobShape(anno_datum.datum());
  this->transformed_data_.Reshape(top_shape);
  // Reshape batch according to the batch_size.
  top_shape[0] = batch_size;
  batch->data_.Reshape(top_shape);//  图像数据域=======================

  Dtype* top_data = batch->data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables
  if (this->output_labels_ && !has_anno_type_) {
    top_label = batch->label_.mutable_cpu_data();
  }

  // Store transformed annotation.
  map<int, vector<AnnotationGroup> > all_anno;
  int num_bboxes = 0;

// 数据预处理============================================================= 
  for (int item_id = 0; item_id < batch_size; ++item_id) 
  {
    timer.Start();
    // get a anno_datum
    AnnotatedDatum& anno_datum = *(reader_.full().pop("Waiting for data"));
    read_time += timer.MicroSeconds();
    timer.Start();
    AnnotatedDatum distort_datum;
    AnnotatedDatum* expand_datum = NULL;// expand_datum 成后的数据
// distort (变形亮度变化) 处理==================================
// 图像的HSL( 色相 Hue 、 饱和度 Saturation 、 明亮度 Lightness) 对比度 Contrast
// random_order_prob  重新排布 图像通道 rgb 顺序
// expand 0像素扩展处理
    if (transform_param.has_distort_param()) 
	{
      distort_datum.CopyFrom(anno_datum);// 复制 anno_datum 到 distort_datum
	  // (变形亮度变化) 处理
      this->data_transformer_->DistortImage(anno_datum.datum(),
                                            distort_datum.mutable_datum());
      if (transform_param.has_expand_param()) 
	  {
        expand_datum = new AnnotatedDatum();
		// 这个主要是将DistortImage的图片用像素0进行扩展,标签bbox此时肯定会改变,
		// 就重新以黑边的左上角为原点计算[0,1]的bbox的左上角和右下角两个点坐标。
        this->data_transformer_->ExpandImage(distort_datum, expand_datum);
      } 
	  else 
	  {
        expand_datum = &distort_datum;
      }
    }
	
// 这个主要是将DistortImage的图片用像素0进行扩展,标签bbox此时肯定会改变,
// 就重新以黑边的左上角为原点计算[0,1]的bbox的左上角和右下角两个点坐标。
	else 
	{
      if (transform_param.has_expand_param()) {
        expand_datum = new AnnotatedDatum();
        this->data_transformer_->ExpandImage(anno_datum, expand_datum);
      } else {
        expand_datum = &anno_datum;
      }
    }
	
// 标签裁剪也很好理解首先要,
// 通过ProjectBBox将原坐标系标签投影到裁剪后图片的新坐标系的坐标,
// 然后再ClipBBox到通过ProjectBBox将原坐标系标签投影到裁剪后图片的新坐标系的坐标,
// 然后再ClipBBox到[0,1]之间。
    AnnotatedDatum* sampled_datum = NULL;
    bool has_sampled = false;
    if (batch_samplers_.size() > 0) 
	{
      // Generate sampled bboxes from expand_datum.
      vector<NormalizedBBox> sampled_bboxes;
      GenerateBatchSamples(*expand_datum, batch_samplers_, &sampled_bboxes);
      if (sampled_bboxes.size() > 0) {
        // Randomly pick a sampled bbox and crop the expand_datum.
        int rand_idx = caffe_rng_rand() % sampled_bboxes.size();
        sampled_datum = new AnnotatedDatum();
        this->data_transformer_->CropImage(*expand_datum,
                                           sampled_bboxes[rand_idx],
                                           sampled_datum);
        has_sampled = true;
      } else {
        sampled_datum = expand_datum;
      }
    } 
	else 
	{
      sampled_datum = expand_datum;
    }
	
    CHECK(sampled_datum != NULL);
    timer.Start();
    vector<int> shape =
        this->data_transformer_->InferBlobShape(sampled_datum->datum());
		
///////////////////////////////////////////////////////////////////////////////
// Resize:最后将图片放缩到某个尺寸(300x300、416*416),标签框也是线性放缩坐标而已。
    if (transform_param.has_resize_param()) 
	{
      if (transform_param.resize_param().resize_mode() ==
          ResizeParameter_Resize_mode_FIT_SMALL_SIZE) {
        this->transformed_data_.Reshape(shape);
        batch->data_.Reshape(shape);
        top_data = batch->data_.mutable_cpu_data();
      } else {
        CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4,
              shape.begin() + 1));
      }
    } 
	else 
	{
      CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4,
            shape.begin() + 1));
    }
	
// 图片镜像、缩放、剪裁等===========================================
// Apply data transformations (mirror, scale, crop...)
    int offset = batch->data_.offset(item_id);
    this->transformed_data_.set_cpu_data(top_data + offset);
	
    vector<AnnotationGroup> transformed_anno_vec;
    if (this->output_labels_) 
	{
      if (has_anno_type_) {
        // Make sure all data have same annotation type.
        CHECK(sampled_datum->has_type()) << "Some datum misses AnnotationType.";
        if (anno_data_param.has_anno_type()) 
		{
          sampled_datum->set_type(anno_type_);
        } 
		else 
		{
          CHECK_EQ(anno_type_, sampled_datum->type()) <<
              "Different AnnotationType.";
        }
        // Transform datum and annotation_group at the same time
        transformed_anno_vec.clear();
        this->data_transformer_->Transform(*sampled_datum,
                                           &(this->transformed_data_),
                                           &transformed_anno_vec);
        if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
          // Count the number of bboxes.
          for (int g = 0; g < transformed_anno_vec.size(); ++g) {
            num_bboxes += transformed_anno_vec[g].annotation_size();
          }
        } else {
          LOG(FATAL) << "Unknown annotation type.";
        }
        all_anno[item_id] = transformed_anno_vec;
      } else {
        this->data_transformer_->Transform(sampled_datum->datum(),
                                           &(this->transformed_data_));
        // Otherwise, store the label from datum.
        CHECK(sampled_datum->datum().has_label()) << "Cannot find any label.";
        top_label[item_id] = sampled_datum->datum().label();
      }
    } 
	else 
	{
      this->data_transformer_->Transform(sampled_datum->datum(),
                                         &(this->transformed_data_));
    }
	
// clear memory=====================================================
    if (has_sampled) 
	{
      delete sampled_datum;
    }
    if (transform_param.has_expand_param()) 
	{
      delete expand_datum;
    }
    trans_time += timer.MicroSeconds();

    reader_.free().push(const_cast<AnnotatedDatum*>(&anno_datum));
}

// 转换检测数据AnnotatedDatum
// 图像数据
// datum = anno_datum->mutable_datum()
//  datum->set_data();                      // 所有8位像素数据
//  datum->set_channels(cv_img.channels()); // 通道数量
//  datum->set_height(cv_img.rows);         // 行
//  datum->set_width(cv_img.cols);          // 列
//  datum->set_encoded(true);               // 编码?
// 标签
// anno_datum->mutable_annotation_group(g)->set_group_label(label)   标签
//                                        ->add_annotation()->mutable_bbox()  边框数据
  // Store "rich" annotation if needed.
  if (this->output_labels_ && has_anno_type_) {
    vector<int> label_shape(4);
    if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) {
      label_shape[0] = 1;
      label_shape[1] = 1;
      label_shape[3] = 8;
      // 无边框 标签label 
      if (num_bboxes == 0) 
	  {
        // Store all -1 in the label.
        label_shape[2] = 1;
        batch->label_.Reshape(label_shape);
        caffe_set<Dtype>(8, -1, batch->label_.mutable_cpu_data());
      } 
	  else 
	  {
        // Reshape the label and store the annotation.
        label_shape[2] = num_bboxes;// 多个边框数据
        batch->label_.Reshape(label_shape);
        top_label = batch->label_.mutable_cpu_data();
        int idx = 0;
        for (int item_id = 0; item_id < batch_size; ++item_id) {
          const vector<AnnotationGroup>& anno_vec = all_anno[item_id];
          for (int g = 0; g < anno_vec.size(); ++g) 
		  {
            const AnnotationGroup& anno_group = anno_vec[g];
            for (int a = 0; a < anno_group.annotation_size(); ++a)
				{
              const Annotation& anno = anno_group.annotation(a);
              const NormalizedBBox& bbox = anno.bbox();// 边框数据
              top_label[idx++] = item_id;// batch id 0~3  / 0~8 / 0~15
              top_label[idx++] = anno_group.group_label();// 类比标签 id
              top_label[idx++] = anno.instance_id();
              top_label[idx++] = bbox.xmin();// 左上角  
              top_label[idx++] = bbox.ymin();
              top_label[idx++] = bbox.xmax();// 右下角
              top_label[idx++] = bbox.ymax();
              top_label[idx++] = bbox.difficult();
            }
          }
        }
      }
    } 
	else 
	{
      LOG(FATAL) << "Unknown annotation type.";
    }
  }
  timer.Stop();
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
void DenseImageDataLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());
  CHECK(this->transformed_data_.count());
  DenseImageDataParameter dense_image_data_param = this->layer_param_.dense_image_data_param();
  const int batch_size = dense_image_data_param.batch_size();
  const int new_height = dense_image_data_param.new_height();
  const int new_width = dense_image_data_param.new_width();
  const int crop_height = dense_image_data_param.crop_height();
  const int crop_width  = dense_image_data_param.crop_width();
  const int crop_size = this->layer_param_.transform_param().crop_size();
  const bool is_color = dense_image_data_param.is_color();
  string root_folder = dense_image_data_param.root_folder();

  // Reshape on single input batches for inputs of varying dimension.
  if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0 && crop_height == 0 && crop_width == 0) {
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
        0, 0, is_color);
    this->prefetch_data_.Reshape(1, cv_img.channels(),
        cv_img.rows, cv_img.cols);
    this->transformed_data_.Reshape(1, cv_img.channels(),
        cv_img.rows, cv_img.cols);
    this->prefetch_label_.Reshape(1, 1, cv_img.rows, cv_img.cols);
    this->transformed_label_.Reshape(1, 1, cv_img.rows, cv_img.cols);
  }
  Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();
  // datum scales
  const int lines_size = lines_.size();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
        new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
    cv::Mat cv_lab = ReadImageToCVMat(root_folder + lines_[lines_id_].second,
        new_height, new_width, false, true);
    CHECK(cv_lab.data) << "Could not load " << lines_[lines_id_].second;
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply random horizontal mirror of images
    if (this->layer_param_.dense_image_data_param().mirror()) {
      const bool do_mirror = caffe_rng_rand() % 2;
      if (do_mirror) {
        cv::flip(cv_img,cv_img,1);
        cv::flip(cv_lab,cv_lab,1);
      }
    }
    // Apply crop
    int height = cv_img.rows;
    int width = cv_img.cols;

    int h_off = 0;
    int w_off = 0;
    if (crop_height>0 && crop_width>0) {
      h_off = caffe_rng_rand() % (height - crop_height + 1);
      w_off = caffe_rng_rand() % (width - crop_width + 1);
      cv::Rect myROI(w_off, h_off, crop_width, crop_height);
      cv_img = cv_img(myROI);
      cv_lab = cv_lab(myROI);
    }

    // Apply transformations (mirror, crop...) to the image
    int offset = this->prefetch_data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    // transform label the same way
    int label_offset = this->prefetch_label_.offset(item_id);
    this->transformed_label_.set_cpu_data(prefetch_label + label_offset);

    this->data_transformer_->Transform(cv_lab, &this->transformed_label_, true);
    CHECK(!this->layer_param_.transform_param().mirror() &&
        this->layer_param_.transform_param().crop_size() == 0)
        << "FIXME: Any stochastic transformation will break layer due to "
        << "the need to transform input and label images in the same way";
    trans_time += timer.MicroSeconds();

    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.dense_image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
Example #25
0
void CPMDataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {

  CPUTimer batch_timer;
  batch_timer.Start();
  double deque_time = 0;
  double decod_time = 0;
  double trans_time = 0;
  static int cnt = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());

  // Reshape on single input batches for inputs of varying dimension.
  const int batch_size = this->layer_param_.cpmdata_param().batch_size();
  const int crop_size = this->layer_param_.transform_param().crop_size();
  bool force_color = this->layer_param_.cpmdata_param().force_encoded_color();
  if (batch_size == 1 && crop_size == 0) {
    Datum& datum = *(reader_.full().peek());
    if (datum.encoded()) {
      if (force_color) {
        DecodeDatum(&datum, true);
      } else {
        DecodeDatumNative(&datum);
      }
    }
    batch->data_.Reshape(1, datum.channels(),
        datum.height(), datum.width());
    this->transformed_data_.Reshape(1, datum.channels(),
    datum.height(), datum.width());
  }

  Dtype* top_data = batch->data_.mutable_cpu_data();
  Dtype* top_label = NULL;  // suppress warnings about uninitialized variables

  if (this->output_labels_) {
    top_label = batch->label_.mutable_cpu_data();
  }
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    Datum& datum = *(reader_.full().pop("Waiting for data"));
    deque_time += timer.MicroSeconds();

    timer.Start();
    cv::Mat cv_img;
    if (datum.encoded()) {
      if (force_color) {
        cv_img = DecodeDatumToCVMat(datum, true);
      } else {
        cv_img = DecodeDatumToCVMatNative(datum);
      }
      if (cv_img.channels() != this->transformed_data_.channels()) {
        LOG(WARNING) << "Your dataset contains encoded images with mixed "
        << "channel sizes. Consider adding a 'force_color' flag to the "
        << "model definition, or rebuild your dataset using "
        << "convert_imageset.";
      }
    }
    decod_time += timer.MicroSeconds();

    // Apply data transformations (mirror, scale, crop...)
    timer.Start();
    const int offset_data = batch->data_.offset(item_id);
    const int offset_label = batch->label_.offset(item_id);
    this->transformed_data_.set_cpu_data(top_data + offset_data);
    this->transformed_label_.set_cpu_data(top_label + offset_label);
    if (datum.encoded()) {
      this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    } else {
      this->data_transformer_->Transform_nv(datum, 
        &(this->transformed_data_),
        &(this->transformed_label_), cnt);
      ++cnt;
    }
    // if (this->output_labels_) {
    //   top_label[item_id] = datum.label();
    // }
    trans_time += timer.MicroSeconds();

    reader_.free().push(const_cast<Datum*>(&datum));
  }
  batch_timer.Stop();

#ifdef BENCHMARK_DATA
  LOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  LOG(INFO) << "  Dequeue time: " << deque_time / 1000 << " ms.";
  LOG(INFO) << "   Decode time: " << decod_time / 1000 << " ms.";
  LOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
#endif
}
Example #26
0
void ImageDataLayer<Dtype>::InternalThreadEntry() {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(this->prefetch_data_.count());
  CHECK(this->transformed_data_.count());
  ImageDataParameter image_data_param = this->layer_param_.image_data_param();
  const int batch_size = image_data_param.batch_size();
  const int new_height = image_data_param.new_height();
  const int new_width = image_data_param.new_width();
  const int crop_size = this->layer_param_.transform_param().crop_size();
  const bool is_color = image_data_param.is_color();
  string root_folder = image_data_param.root_folder();

  // Reshape on single input batches for inputs of varying dimension.
  if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) {
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
        0, 0, is_color);
    this->prefetch_data_.Reshape(1, cv_img.channels(),
        cv_img.rows, cv_img.cols);
    this->transformed_data_.Reshape(1, cv_img.channels(),
        cv_img.rows, cv_img.cols);
  }

  Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data();
  Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data();

  // datum scales
  const int lines_size = lines_.size();
  int label_dim = this->layer_param_.image_data_param().label_dim();
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    // get a blob
    timer.Start();
    CHECK_GT(lines_size, lines_id_);
    cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first,
        new_height, new_width, is_color);
    CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first;
    read_time += timer.MicroSeconds();
    timer.Start();
    // Apply transformations (mirror, crop...) to the image
    int offset = this->prefetch_data_.offset(item_id);
    this->transformed_data_.set_cpu_data(prefetch_data + offset);
    this->data_transformer_->Transform(cv_img, &(this->transformed_data_));
    trans_time += timer.MicroSeconds();

    for(int i = 0;i < label_dim;++i){
        prefetch_label[item_id * label_dim + i] = lines_[lines_id_].second[i];
    }
    // go to the next iter
    lines_id_++;
    if (lines_id_ >= lines_size) {
      // We have reached the end. Restart from the first.
      DLOG(INFO) << "Restarting data prefetching from start.";
      lines_id_ = 0;
      if (this->layer_param_.image_data_param().shuffle()) {
        ShuffleImages();
      }
    }
  }
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}